mirror of
https://github.com/Unidata/netcdf-c.git
synced 2025-04-12 18:10:24 +08:00
Significantly Improve Amazon S3 Cloud Storage Support
## S3 Related Fixes * Add comprehensive support for specifying AWS profiles to provide access credentials. * Parse the files "~/.aws/config" and "~/.aws/credentials to provide credentials for the HDF5 ROS3 driver and to locate default region. * Add a function to obtain the currently active S3 credentials. The search rules are defined in docs/nczarr.md. * Provide documentation for the new features. * Modify the struct NCauth (in include/ncauth.h) to replace specific S3 credentials with a profile name. * Add a unit test to test the operation of profile and credentials management. * Add support for URLS of the form "s3://<bucket>/<key>"; this requires obtaining a default region. * Allows the specification of profile and/or region in a URL of the form "#mode=nczarr,...&aws.region=...&aws.profile=..." ## Misc. Fixes * Move the ezxml code to libdispatch so that it can be used both by DAP4 and nczarr. * Modify nclist to provide a deep clone operation. * Modify ncuri to provide a deep clone operation. * Modify the .rc file format to allow the specification of a path to be tested when looking for an entry in the .rc file. * Ensure that the NC_rcload function is called. * Modify nchttp to support setting request headers.
This commit is contained in:
parent
de403a0fe8
commit
6b69b9c52c
2
.github/workflows/run_tests.yml
vendored
2
.github/workflows/run_tests.yml
vendored
@ -4,7 +4,7 @@
|
||||
|
||||
name: Run netCDF Tests
|
||||
|
||||
on: [pull_request]
|
||||
on: [pull_request,push]
|
||||
|
||||
jobs:
|
||||
|
||||
|
@ -1136,7 +1136,7 @@ ENDIF(ENABLE_NCZARR_S3)
|
||||
|
||||
IF(NOT ENABLE_S3_SDK)
|
||||
IF(ENABLE_NCZARR_S3 OR ENABLE_NCZARR_S3_TESTS)
|
||||
message(FATAL_ERROR "AWS S3 libraries not found; please specify option DENABLE_NCZARR_S3=NO")
|
||||
message(FATAL_ERROR "S3 support library not found; please specify option DENABLE_NCZARR_S3=NO")
|
||||
SET(ENABLE_NCZARR_S3 OFF CACHE BOOL "NCZARR S3 support" FORCE)
|
||||
SET(ENABLE_NCZARR_S3_TESTS OFF CACHE BOOL "S3 tests" FORCE)
|
||||
ENDIF()
|
||||
|
@ -113,7 +113,8 @@ endif
|
||||
# and run. ncgen must come before ncdump, because their tests
|
||||
# depend on it.
|
||||
SUBDIRS = include $(H5_TEST_DIR) libdispatch libsrc $(LIBSRC4_DIR) \
|
||||
$(LIBSRCP) $(LIBHDF4) $(LIBHDF5) $(OCLIB) $(DAP2) ${DAP4} ${NCPOCO} ${ZARR} liblib \
|
||||
$(LIBSRCP) $(LIBHDF4) $(LIBHDF5) $(OCLIB) $(DAP2) ${DAP4} \
|
||||
${NCPOCO} ${ZARR} liblib \
|
||||
$(NCGEN3) $(NCGEN) $(NCDUMP) ${PLUGIN_DIR} $(TESTDIRS) docs \
|
||||
$(EXAMPLES)
|
||||
|
||||
|
@ -7,6 +7,7 @@ This file contains a high-level description of this package's evolution. Release
|
||||
|
||||
## 4.8.2 - TBD
|
||||
|
||||
* [Enhancement] Support Amazon S3 access for NCZarr. Also support use of the existing Amazon SDK credentials system. See [Github #2???](https://github.com/Unidata/netcdf-c/pull/2???)
|
||||
* [Bug Fix] Ensure that internal Fortran APIs are always defined. See [Github #2098](https://github.com/Unidata/netcdf-c/pull/2098).
|
||||
* [Enhancement] Support filters for NCZarr. See [Github #2101](https://github.com/Unidata/netcdf-c/pull/2101)
|
||||
* [Bug Fix] Make PR 2075 long file name be idempotent. See [Github #2094](https://github.com/Unidata/netcdf-c/pull/2094).
|
||||
|
24
configure.ac
24
configure.ac
@ -642,25 +642,27 @@ AC_MSG_RESULT($enable_nczarr_s3)
|
||||
# Note we check for the library after checking for enable_nczarr_s3
|
||||
# because for some reason this screws up if we unconditionally test for sdk
|
||||
# and it is not available. Fix someday
|
||||
have_aws=no
|
||||
enable_s3_sdk=no
|
||||
if test "x$enable_nczarr_s3" = xyes ; then
|
||||
# See if we have the s3 aws library
|
||||
# Check for the AWS S3 SDK library
|
||||
AC_LANG_PUSH([C++])
|
||||
AC_SEARCH_LIBS([aws_allocator_is_valid],[aws-c-common aws-cpp-sdk-s3 aws-cpp-sdk-core], [have_aws=yes],[have_aws=no])
|
||||
AC_SEARCH_LIBS([aws_allocator_is_valid],[aws-c-common aws-cpp-sdk-s3 aws-cpp-sdk-core], [enable_s3_sdk=yes],[enable_s3_sdk=no])
|
||||
AC_LANG_POP
|
||||
fi
|
||||
|
||||
AC_MSG_CHECKING([whether AWS S3 SDK library is available])
|
||||
AC_MSG_RESULT([$have_aws])
|
||||
AC_MSG_RESULT([$enable_s3_sdk])
|
||||
|
||||
if test "x$have_aws" = xno ; then
|
||||
AC_MSG_WARN([AWS SDK not found; disabling S3 support])
|
||||
if test "x$enable_s3_sdk" = xno ; then
|
||||
AC_MSG_WARN([No S3 library available; disabling S3 support])
|
||||
enable_nczarr_s3=no
|
||||
else
|
||||
fi
|
||||
|
||||
if test "x$enable_s3_sdk" = xyes ; then
|
||||
AC_DEFINE([ENABLE_S3_SDK], [1], [If true, then S3 sdk was found])
|
||||
fi
|
||||
AM_CONDITIONAL(ENABLE_S3_SDK, [test "x$have_aws" = xyes])
|
||||
AM_CONDITIONAL(ENABLE_S3_SDK, [test "x$enable_s3_sdk" = xyes])
|
||||
|
||||
# Check for enabling S3 testing
|
||||
AC_MSG_CHECKING([whether netcdf zarr S3 testing should be enabled])
|
||||
@ -1265,10 +1267,10 @@ if test "x$enable_hdf5" = xyes; then
|
||||
|
||||
# Check for the main hdf5 and hdf5_hl library.
|
||||
|
||||
AC_SEARCH_LIBS([H5Fflush], [hdf5dll hdf5], [],
|
||||
AC_SEARCH_LIBS([H5Fflush], [hdf5 hdf5.dll], [],
|
||||
[AC_MSG_ERROR([Can't find or link to the hdf5 library. Use --disable-hdf5, or see config.log for errors.])])
|
||||
AC_SEARCH_LIBS([H5DSis_scale], [hdf5_hl hdf5_hl.dll], [],
|
||||
[AC_MSG_ERROR([Can't find or link to the hdf5 library. Use --disable-hdf5, or see config.log for errors.])])
|
||||
AC_SEARCH_LIBS([H5DSis_scale], [hdf5_hldll hdf5_hl], [],
|
||||
[AC_MSG_ERROR([Can't find or link to the hdf5 high-level. Use --disable-hdf5, or see config.log for errors.])])
|
||||
|
||||
AC_CHECK_HEADERS([hdf5.h], [], [AC_MSG_ERROR([Compiling a test with HDF5 failed. Either hdf5.h cannot be found, or config.log should be checked for other reason.])])
|
||||
|
||||
@ -1600,7 +1602,7 @@ AM_CONDITIONAL(ENABLE_BYTERANGE, [test "x$enable_byterange" = xyes])
|
||||
AM_CONDITIONAL(RELAX_COORD_BOUND, [test "xyes" = xyes])
|
||||
AM_CONDITIONAL(HAS_PAR_FILTERS, [test x$hdf5_supports_par_filters = xyes ])
|
||||
AM_CONDITIONAL(ENABLE_NCZARR, [test "x$enable_nczarr" = xyes])
|
||||
AM_CONDITIONAL(HAVE_AWS, [test "x$have_aws" = xyes])
|
||||
AM_CONDITIONAL(ENABLE_S3_SDK, [test "x$enable_s3_sdk" = xyes])
|
||||
AM_CONDITIONAL(HAS_MULTIFILTERS, [test "x$has_multifilters" = xyes])
|
||||
AM_CONDITIONAL(ENABLE_BLOSC, [test "x$enable_blosc" = xyes])
|
||||
AM_CONDITIONAL(ENABLE_SZIP, [test "x$enable_szip" = xyes])
|
||||
|
@ -92,6 +92,6 @@ obsolete/fan_utils.html bestpractices.md filters.md indexing.md
|
||||
inmemory.md DAP2.dox attribute_conventions.md FAQ.md
|
||||
file_format_specifications.md known_problems.md
|
||||
COPYRIGHT.dox user_defined_formats.md DAP4.md DAP4.dox
|
||||
testserver.dox byterange.dox filters.md nczarr.md)
|
||||
testserver.dox byterange.dox filters.md nczarr.md auth.md)
|
||||
|
||||
ADD_EXTRA_DIST("${CUR_EXTRA_DIST}")
|
||||
|
@ -742,8 +742,9 @@ INPUT = \
|
||||
@abs_top_srcdir@/docs/windows-binaries.md \
|
||||
@abs_top_srcdir@/docs/attribute_conventions.md \
|
||||
@abs_top_srcdir@/docs/file_format_specifications.md \
|
||||
@abs_top_srcdir@/docs/byterange.dox \
|
||||
@abs_top_srcdir@/docs/inmemory.md \
|
||||
@abs_top_srcdir@/docs/filters.md \
|
||||
@abs_top_srcdir@/docs/byterange.dox \
|
||||
@abs_top_srcdir@/docs/auth.md \
|
||||
@abs_top_srcdir@/docs/nczarr.md \
|
||||
@abs_top_srcdir@/docs/notes.md \
|
||||
|
@ -751,6 +751,7 @@ INPUT = \
|
||||
./docs/byterange.dox \
|
||||
./docs/inmemory.md \
|
||||
./docs/auth.md \
|
||||
./docs/filters.md \
|
||||
./docs/notes.md \
|
||||
./docs/all-error-codes.md \
|
||||
./docs/building-with-cmake.md \
|
||||
|
@ -11,7 +11,7 @@ EXTRA_DIST = netcdf.m4 DoxygenLayout.xml Doxyfile.in footer.html \
|
||||
mainpage.dox tutorial.dox cdl.dox \
|
||||
architecture.dox internal.dox windows-binaries.md \
|
||||
building-with-cmake.md CMakeLists.txt groups.dox notes.md \
|
||||
install-fortran.md all-error-codes.md credits.md auth.md \
|
||||
install-fortran.md all-error-codes.md credits.md auth.md filters.md \
|
||||
obsolete/fan_utils.html indexing.dox \
|
||||
inmemory.md attribute_conventions.md FAQ.md \
|
||||
file_format_specifications.md known_problems.md COPYRIGHT.md \
|
||||
|
480
docs/auth.html
480
docs/auth.html
@ -1,480 +0,0 @@
|
||||
<!- Copyright 2015, UCAR/Unidata and OPeNDAP, Inc. -->
|
||||
<!- See the COPYRIGHT file for more information. -->
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
.break { page-break-before: always; }
|
||||
body { counter-reset: H2; font-size: 12pt; }
|
||||
h1.title {
|
||||
font-size: 18pt;
|
||||
text-decoration: underline;
|
||||
}
|
||||
div.subtitle {
|
||||
}
|
||||
.subtitle h1 {
|
||||
font-size: 14pt;
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
h1.toc {
|
||||
font-size: 16pt;
|
||||
text-decoration: underline;
|
||||
}
|
||||
h1.appendix {
|
||||
font-size: 14pt;
|
||||
}
|
||||
|
||||
h2:before {
|
||||
content: counter(H2) " ";
|
||||
counter-increment: H2;
|
||||
}
|
||||
h2 { counter-reset: H3; text-decoration: underline; }
|
||||
h3:before {
|
||||
content: counter(H2) "." counter(H3) " ";
|
||||
counter-increment:H3;
|
||||
}
|
||||
h3 { counter-reset: H4; }
|
||||
h4:before {
|
||||
content: counter(H2) "." counter(H3) "." counter(H4) " ";
|
||||
counter-increment:H4;
|
||||
}
|
||||
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<h1 class="title">netCDF Authorization Support</h1>
|
||||
<div class="subtitle">
|
||||
<h1>Author: Dennis Heimbigner</h1>
|
||||
<h1>Address: http://www.unidata.ucar.edu/staff/dmh/</h1>
|
||||
<h1>Draft: 11/21/2014</h1>
|
||||
<h1>Last Revised: 10/24/2015</h1>
|
||||
</div>
|
||||
|
||||
<h1 class="toc">Table of Contents</h1>
|
||||
<ol>
|
||||
<li> <a href="#Introduction">Introduction</a>
|
||||
<li> <a href="#URL-AUTH">URL-Based Authentication</a>
|
||||
<li> <a href="#DODSRC">RC File Authentication</a>
|
||||
<li> <a href="#REDIR">Redirection-Based Authentication</a>
|
||||
<li> <a href="#URLCONS">URL Constrained RC File Entries</a>
|
||||
<li> <a href="#CLIENTCERTS">Client-Side Certificates</a>
|
||||
<li> <a href="#allkeys">Appendix A. All RC-File Keys</a>
|
||||
<li> <a href="#ESGDETAIL">Appendix B. ESG Access in Detail</a>
|
||||
</ol>
|
||||
|
||||
<h2><a name="Introduction">Introduction</a></h2>
|
||||
netCDF can support user authorization using the facilities provided by the curl
|
||||
library. This includes basic password authentication as well as
|
||||
certificate-based authorization.
|
||||
<p>
|
||||
At the moment, this document only applies to DAP2 and DAP4 access
|
||||
because they are (for now) the only parts of the netCDF-C library
|
||||
that uses libcurl.
|
||||
<p>
|
||||
With some exceptions (e.g. see the section on <a href="#REDIR">redirection</a>)
|
||||
The libcurl authorization mechanisms can be accessed in two ways
|
||||
<ol>
|
||||
<li> Inserting the username and password into the url, or
|
||||
<li> Accessing information from a so-called <i>rc</i> file named either
|
||||
<i>.daprc</i> or <i>.dodsrc</i>
|
||||
</ol>
|
||||
|
||||
<h2><a name="URL-AUTH">URL-Based Authentication</a></h2>
|
||||
For simple password based authentication, it is possible to
|
||||
directly insert the username and the password into a url in this form.
|
||||
<pre>
|
||||
http://username:password@host/...
|
||||
</pre>
|
||||
This username and password will be used if the server asks for
|
||||
authentication. Note that only simple password authentication
|
||||
is supported in this format.
|
||||
Specifically note that <a href="#REDIR">redirection</a> based
|
||||
authorization will not work with this because the username and password
|
||||
will only be used on the initial request, not the redirection
|
||||
|
||||
<h2><a name="DODSRC">RC File Authentication</a></h2>
|
||||
The netcdf library supports an <i>rc</i> file mechanism to allow the passing
|
||||
of a number of parameters to libnetcdf and libcurl.
|
||||
<p>
|
||||
The file must be called one of the following names:
|
||||
".daprc" or ".dodsrc"
|
||||
If both .daprc and .dodsrc exist, then
|
||||
the .daprc file will take precedence.
|
||||
<p>
|
||||
The rc file is searched for first in the current directory
|
||||
and then in the home directory (as defined by the HOME environment
|
||||
variable).
|
||||
<p>
|
||||
The rc file format is a series of lines of the general form:
|
||||
<pre>
|
||||
[<host:port>]<key>=<value>
|
||||
</pre>
|
||||
where the bracket-enclosed host:port is optional and will be discussed
|
||||
subsequently.
|
||||
<p>
|
||||
The currently defined set of authorization-related keys are as follows.
|
||||
The second column is the affected curl_easy_setopt option(s), if any.
|
||||
<table>
|
||||
<tr><th>Key<th>Affected curl_easy_setopt Options<th>Notes
|
||||
<tr><td>HTTP.COOKIEJAR<td>CURLOPT_COOKIEJAR
|
||||
<tr><td>HTTP.COOKIEFILE<td>CURLOPT_COOKIEJAR<td>Alias for CURLOPT_COOKIEJAR
|
||||
<tr><td>HTTP.PROXY_SERVER<td>CURLOPT_PROXY, CURLOPT_PROXYPORT, CURLOPT_PROXYUSERPWD
|
||||
<tr><td>HTTP.SSL.CERTIFICATE<td>CURLOPT_SSLCERT
|
||||
<tr><td>HTTP.SSL.KEY<td>CURLOPT_SSLKEY
|
||||
<tr><td>HTTP.SSL.KEYPASSWORD<td>CURLOPT_KEYPASSWORD
|
||||
<tr><td>HTTP.SSL.CAINFO<td>CURLOPT_SSLCAINFO
|
||||
<tr><td>HTTP.SSL.CAPATH<td>CURLOPT_SSLCAPATH
|
||||
<tr><td>HTTP.SSL.VERIFYPEER<td>CURLOPT_SSL_VERIFYPEER
|
||||
<tr><td>HTTP.SSL.VALIDATE<td>CURLOPT_SSL_VERIFYPEER, CURLOPT_SSL_VERIFYHOST
|
||||
<tr><td>HTTP.CREDENTIALS.USERPASSWORD<td>CURLOPT_USERPASSWORD
|
||||
<tr><td>HTTP.NETRC<td>N.A.<td>Specify path of the .netrc file
|
||||
</table>
|
||||
</ul>
|
||||
|
||||
<h3>Password Authentication</h3>
|
||||
The key
|
||||
HTTP.CREDENTIALS.USERPASSWORD
|
||||
can be used to set the simple password authentication.
|
||||
This is an alternative to setting it in the url.
|
||||
The value must be of the form "username:password".
|
||||
See <a href="#REDIR">redirection authorization</a>
|
||||
for important additional information.
|
||||
|
||||
<h3>Cookie Jar</h3>
|
||||
The HTTP.COOKIEJAR key
|
||||
specifies the name of file from which
|
||||
to read cookies (CURLOPT_COOKIEJAR) and also
|
||||
the file into which to store cookies (CURLOPT_COOKIEFILE).
|
||||
The same value is used for both CURLOPT values.
|
||||
It defaults to in-memory storage.
|
||||
See <a href="#REDIR">redirection authorization</a>
|
||||
for important additional information.
|
||||
|
||||
<h3>Certificate Authentication</h3>
|
||||
HTTP.SSL.CERTIFICATE
|
||||
specifies a file path for a file containing a PEM cerficate.
|
||||
This is typically used for client-side authentication.
|
||||
<p>
|
||||
HTTP.SSL.KEY is essentially the same as HTTP.SSL.CERTIFICATE
|
||||
and should always have the same value.
|
||||
<p>
|
||||
HTTP.SSL.KEYPASSWORD
|
||||
specifies the password for accessing the HTTP.SSL.CERTIFICAT/HTTP.SSL.key file.
|
||||
<p>
|
||||
HTTP.SSL.CAPATH
|
||||
specifies the path to a directory containing
|
||||
trusted certificates for validating server certificates.
|
||||
<p>
|
||||
HTTP.SSL.VALIDATE
|
||||
is a boolean (1/0) value that if true (1)
|
||||
specifies that the client should verify the server's presented certificate.
|
||||
<p>
|
||||
HTTP.PROXY_SERVER
|
||||
specifies the url for accessing the proxy:
|
||||
e.g. <i>http://[username:password@]host[:port]</i>
|
||||
<p>
|
||||
HTTP.NETRC
|
||||
specifies the absolute path of the .netrc file.
|
||||
See <a href="#REDIR">redirection authorization</a>
|
||||
for information about using .netrc.
|
||||
|
||||
<h2><a name="REDIR">Redirection-Based Authentication</a> </h2>
|
||||
Some sites provide authentication by using a third party site
|
||||
to do the authentication. Examples include ESG and URS.
|
||||
<p>
|
||||
The process is usually as follows.
|
||||
<ol>
|
||||
<li>The client contacts the server of interest (SOI), the actual data provider
|
||||
using, typically http protocol.
|
||||
<li>The SOI sends a redirect to the client to connect to the e.g. URS system
|
||||
using the 'https' protocol (note the use of https instead of http).
|
||||
<li>The client authenticates with URS.
|
||||
<li>URS sends a redirect (with authorization information) to send
|
||||
the client back to the SOI to actually obtain the data.
|
||||
</ol>
|
||||
<p>
|
||||
It turns out that libcurl uses the password in the .daprc file — or from the url —
|
||||
only for the initial connection. This causes problems because
|
||||
the redirected connection is the one that actually requires the password.
|
||||
This is where .netrc comes in. Libcurl will use .netrc for
|
||||
the redirected connection. It is possible to cause libcurl to use
|
||||
the .daprc password always, but this introduces a security hole
|
||||
because it may send the initial user+pwd to the redirection site.
|
||||
In summary, if you are using redirection, then you must create a .netrc
|
||||
file to hold the password for the site to which the redirection is sent.
|
||||
<p>
|
||||
The format of this .netrc file will contain content that
|
||||
typically look like this.
|
||||
<pre>
|
||||
machine mmmmmm login xxxxxx password yyyyyy
|
||||
</pre>
|
||||
where the machine, mmmmmm, is the hostname of the machine to
|
||||
which the client is redirected for authorization, and the
|
||||
login and password are those needed to authenticate on that machine.
|
||||
<p>
|
||||
The .netrc file can be specified by
|
||||
putting the following line in your .daprc/.dodsrc file.
|
||||
<pre>
|
||||
HTTP.NETRC=<path to netrc file>
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
One final note. In using this, it is almost certain that you will
|
||||
need to specify a real cookie jar file (HTTP.COOKIEJAR) so that the
|
||||
redirect site can pass back authorization information.
|
||||
|
||||
<h2><a name="URLCONS">URL Constrained RC File Entries</a></h2>
|
||||
Each line of the rc file can begin with
|
||||
a host+port enclosed in square brackets.
|
||||
The form is "host:port".
|
||||
If the port is not specified
|
||||
then the form is just "host".
|
||||
The reason that more of the url is not used is that
|
||||
libcurl's authorization grain is not any finer than host level.
|
||||
<p>
|
||||
Examples.
|
||||
<pre>
|
||||
[remotetest.unidata.ucar.edu]HTTP.VERBOSE=1
|
||||
or
|
||||
[fake.ucar.edu:9090]HTTP.VERBOSE=0
|
||||
</pre>
|
||||
If the url request from, say, the <i>netcdf_open</i> method
|
||||
has a host+port matching one of the prefixes in the rc file, then
|
||||
the corresponding entry will be used, otherwise ignored.
|
||||
<p>
|
||||
For example, the URL
|
||||
<pre>
|
||||
http://remotetest.unidata.ucar.edu/thredds/dodsC/testdata/testData.nc
|
||||
</pre>
|
||||
will have HTTP.VERBOSE set to 1.
|
||||
<p>
|
||||
Similarly,
|
||||
<pre>
|
||||
http://fake.ucar.edu:9090/dts/test.01
|
||||
</pre>
|
||||
will have HTTP.VERBOSE set to 0.
|
||||
|
||||
<h2><a name="CLIENTCERTS">Client-Side Certificates</a></h2>
|
||||
Some systems, notably ESG (Earth System Grid), requires
|
||||
the use of client-side certificates, as well as being
|
||||
<a href="#REDIR">re-direction based</a>.
|
||||
This requires setting the following entries:
|
||||
<ul>
|
||||
<li>HTTP.COOKIEJAR — a file path for storing cookies across re-direction.
|
||||
<li>HTTP.NETRC — the path to the netrc file.
|
||||
<li>HTTP.SSL.CERTIFICATE — the file path for the client side certificate file.
|
||||
<li>HTTP.SSL.KEY — this should have the same value as HTTP.SSL.CERTIFICATE.
|
||||
<li>HTTP.SSL.CAPATH — the path to a "certificates" directory.
|
||||
<li>HTTP.SSL.VALIDATE — force validation of the server certificate.
|
||||
</ul>
|
||||
Note that the first two are to support re-direction based authentication.
|
||||
|
||||
<h1 class="appendix><a name="allkeys">Appendix A. All RC-File Keys</a></h1>
|
||||
For completeness, this is the list of all rc-file keys.
|
||||
If this documentation is out of date with respect to the actual code,
|
||||
the code is definitive.
|
||||
<table>
|
||||
<tr><th>Key<th>curl_easy_setopt Option
|
||||
<tr valign="top"><td>HTTP.DEFLATE<td>CUROPT_DEFLATE<br>with value "deflate,gzip"
|
||||
<tr><td>HTTP.VERBOSE <td>CUROPT_VERBOSE
|
||||
<tr><td>HTTP.TIMEOUT<td>CUROPT_TIMEOUT
|
||||
<tr><td>HTTP.USERAGENT<td>CUROPT_USERAGENT
|
||||
<tr><td>HTTP.COOKIEJAR<td>CUROPT_COOKIEJAR
|
||||
<tr><td>HTTP.COOKIE_JAR<td>CUROPT_COOKIEJAR
|
||||
<tr valign="top"><td>HTTP.PROXY_SERVER<td>CURLOPT_PROXY,<br>CURLOPT_PROXYPORT,<br>CURLOPT_PROXYUSERPWD
|
||||
<tr><td>HTTP.SSL.CERTIFICATE<td>CUROPT_SSLCERT
|
||||
<tr><td>HTTP.SSL.KEY<td>CUROPT_SSLKEY
|
||||
<tr><td>HTTP.SSL.KEYPASSWORD<td>CUROPT_KEYPASSWORD
|
||||
<tr><td>HTTP.SSL.CAINFO<td>CUROPT_SSLCAINFO
|
||||
<tr><td>HTTP.SSL.CAPATH<td>CUROPT_SSLCAPATH
|
||||
<tr><td>HTTP.SSL.VERIFYPEER<td>CUROPT_SSL_VERIFYPEER
|
||||
<tr><td>HTTP.CREDENTIALS.USERPASSWORD<td>CUROPT_USERPASSWORD
|
||||
<tr><td>HTTP.NETRC<td>CURLOPT_NETRC,CURLOPT_NETRC_FILE
|
||||
</table>
|
||||
</ul>
|
||||
|
||||
<h1 class="appendix"><a name="URSDETAIL">Appendix B. URS Access in Detail</a></h1>
|
||||
It is possible to use the NASA Earthdata Login System (URS)
|
||||
with netcdf by using using the process specified in the
|
||||
<a href="#REDIR">redirection</a> based authorization section.
|
||||
In order to access URS controlled datasets, however, it is necessary to
|
||||
register as a user with NASA at the
|
||||
<i>https://uat.urs.earthdata.nasa.gov/</i>
|
||||
website.
|
||||
|
||||
<h1 class="appendix"><a name="ESGDETAIL">Appendix C. ESG Access in Detail</a></h1>
|
||||
It is possible to access Earth Systems Grid (ESG) datasets
|
||||
from ESG servers through the netCDF API using the techniques
|
||||
described in the section on <a href="#CLIENTCERTS">Client-Side Certificates</a>.
|
||||
<p>
|
||||
In order to access ESG datasets, however, it is necessary to
|
||||
register as a user with ESG and to setup your environment
|
||||
so that proper authentication is established between an netcdf
|
||||
client program and the ESG data server. Specifically, it
|
||||
is necessary to use what is called "client-side keys" to
|
||||
enable this authentication. Normally, when a client accesses
|
||||
a server in a secure fashion (using "https"), the server
|
||||
provides an authentication certificate to the client.
|
||||
With client-side keys, the client must also provide a
|
||||
certificate to the server so that the server can know with
|
||||
whom it is communicating.
|
||||
<p>
|
||||
The netcdf library uses the <i>curl</i> library and it is that
|
||||
underlying library that must be properly configured.
|
||||
|
||||
<h3>Terminology</h3>
|
||||
The key elements for client-side keys requires the constructions of
|
||||
two "stores" on the client side.
|
||||
<ul>
|
||||
<li> Keystore - a repository to hold the client side key.
|
||||
<li> Truststore - a repository to hold a chain of certificates
|
||||
that can be used to validate the certificate
|
||||
sent by the server to the client.
|
||||
</ul>
|
||||
The server actually has a similar set of stores, but the client
|
||||
need not be concerned with those.
|
||||
|
||||
<h3>Initial Steps</h3>
|
||||
|
||||
The first step is to obtain authorization from ESG.
|
||||
Note that this information may evolve over time, and
|
||||
may be out of date.
|
||||
This discussion is in terms of BADC and NCSA. You will need
|
||||
to substitute as necessary.
|
||||
<ol>
|
||||
<li> Register at http://badc.nerc.ac.uk/register
|
||||
to obtain access to badc and to obtain an openid,
|
||||
which will looks something like:
|
||||
<pre>https://ceda.ac.uk/openid/Firstname.Lastname</pre>
|
||||
<li> Ask BADC for access to whatever datasets are of interest.
|
||||
<p>
|
||||
<li> Obtain short term credentials at
|
||||
http://grid.ncsa.illinois.edu/myproxy/MyProxyLogon/
|
||||
You will need to download and run the MyProxyLogon
|
||||
program.
|
||||
This will create a keyfile in, typically, the directory ".globus".
|
||||
The keyfile will have a name similar to this: "x509up_u13615"
|
||||
The other elements in ".globus" are certificates to use in
|
||||
validating the certificate your client gets from the server.
|
||||
<p>
|
||||
<li> Obtain the program source ImportKey.java
|
||||
from this location: http://www.agentbob.info/agentbob/79-AB.html
|
||||
(read the whole page, it will help you understand the remaining steps).
|
||||
</ol>
|
||||
|
||||
<h3>Building the KeyStore</h3>
|
||||
You will have to modify the keyfile in the previous step
|
||||
and then create a keystore and install the key and a certificate.
|
||||
The commands are these:
|
||||
<pre>
|
||||
openssl pkcs8 -topk8 -nocrypt -in x509up_u13615 -inform PEM -out key.der -outform DER
|
||||
|
||||
openssl x509 -in x509up_u13615 -inform PEM -out cert.der -outform DER
|
||||
|
||||
java -classpath <path to ImportKey.class> -Dkeypassword="<password>" -Dkeystore=./<keystorefilename> key.der cert.der
|
||||
</pre>
|
||||
Note, the file names "key.der" and "cert.der" can be whatever you choose.
|
||||
It is probably best to leave the .der extension, though.
|
||||
|
||||
<h3>Building the TrustStore</h3>
|
||||
Building the truststore is a bit tricky because as provided, the
|
||||
certificates in ".globus" need some massaging. See the script below
|
||||
for the details. The primary command is this, which is executed for every
|
||||
certificate, c, in globus. It sticks the certificate into the file
|
||||
named "truststore"
|
||||
<pre>
|
||||
keytool -trustcacerts -storepass "password" -v -keystore "truststore" -importcert -file "${c}"
|
||||
</pre>
|
||||
|
||||
<h3>Running the C Client</h3>
|
||||
|
||||
Refer to the section on <a href="#CLIENTCERTS">Client-Side Certificates</a>.
|
||||
The keys specified there must be set in the rc file to support
|
||||
ESG access.
|
||||
<ul>
|
||||
<li> HTTP.COOKIEJAR=~/.dods_cookies
|
||||
<li> HTTP.NETRC=~/.netrc
|
||||
<li> HTTP.SSL.CERTIFICATE=~/esgkeystore
|
||||
<li> HTTP.SSL.KEY=~/esgkeystore
|
||||
<li> HTTP.SSL.CAPATH=~/.globus
|
||||
<li> HTTP.SSL.VALIDATE=1
|
||||
</ul>
|
||||
Of course, the file paths above are suggestions only;
|
||||
you can modify as needed.
|
||||
The HTTP.SSL.CERTIFICATE and HTTP.SSL.KEY
|
||||
entries should have same value, which is the file path for the
|
||||
certificate produced by MyProxyLogon. The HTTP.SSL.CAPATH entry
|
||||
should be the path to the "certificates" directory produced by
|
||||
MyProxyLogon.
|
||||
<p>
|
||||
As noted, also uses re-direction based authentication.
|
||||
So, when it receives an initial connection from a client, it
|
||||
redirects to a separate authentication server. When that
|
||||
server has authenticated the client, it redirects back to
|
||||
the original url to complete the request.
|
||||
|
||||
<h3>Script for creating Stores</h3>
|
||||
The following script shows in detail how to actually construct the key
|
||||
and trust stores. It is specific to the format of the globus file
|
||||
as it was when ESG support was first added. It may have changed
|
||||
since then, in which case, you will need to seek some help
|
||||
in fixing this script. It would help if you communicated
|
||||
what you changed to the author so this document can be updated.
|
||||
<pre>
|
||||
#!/bin/sh -x
|
||||
KEYSTORE="esgkeystore"
|
||||
TRUSTSTORE="esgtruststore"
|
||||
GLOBUS="globus"
|
||||
TRUSTROOT="certificates"
|
||||
CERT="x509up_u13615"
|
||||
TRUSTROOTPATH="$GLOBUS/$TRUSTROOT"
|
||||
CERTFILE="$GLOBUS/$CERT"
|
||||
PWD="password"
|
||||
|
||||
D="-Dglobus=$GLOBUS"
|
||||
CCP="bcprov-jdk16-145.jar"
|
||||
CP="./build:${CCP}"
|
||||
JAR="myproxy.jar"
|
||||
|
||||
# Initialize needed directories
|
||||
rm -fr build
|
||||
mkdir build
|
||||
rm -fr $GLOBUS
|
||||
mkdir $GLOBUS
|
||||
rm -f $KEYSTORE
|
||||
rm -f $TRUSTSTORE
|
||||
|
||||
# Compile MyProxyCmd and ImportKey
|
||||
javac -d ./build -classpath "$CCP" *.java
|
||||
javac -d ./build ImportKey.java
|
||||
|
||||
# Execute MyProxyCmd
|
||||
java -cp "$CP myproxy.MyProxyCmd
|
||||
|
||||
# Build the keystore
|
||||
openssl pkcs8 -topk8 -nocrypt -in $CERTFILE -inform PEM -out key.der -outform DER
|
||||
openssl x509 -in $CERTFILE -inform PEM -out cert.der -outform DER
|
||||
java -Dkeypassword=$PWD -Dkeystore=./${KEYSTORE} -cp ./build ImportKey key.der cert.der
|
||||
|
||||
# Clean up the certificates in the globus directory
|
||||
for c in ${TRUSTROOTPATH}/*.0 ; do
|
||||
alias=`basename $c .0`
|
||||
sed -e '0,/---/d' <$c >/tmp/${alias}
|
||||
echo "-----BEGIN CERTIFICATE-----" >$c
|
||||
cat /tmp/${alias} >>$c
|
||||
done
|
||||
|
||||
# Build the truststore
|
||||
for c in ${TRUSTROOTPATH}/*.0 ; do
|
||||
alias=`basename $c .0`
|
||||
echo "adding: $TRUSTROOTPATH/${c}"
|
||||
echo "alias: $alias"
|
||||
yes | keytool -trustcacerts -storepass "$PWD" -v -keystore ./$TRUSTSTORE -alias $alias -importcert -file "${c}"
|
||||
done
|
||||
exit
|
||||
</pre>
|
||||
|
||||
</body>
|
||||
</html>
|
@ -23,7 +23,7 @@ The libcurl authorization mechanisms can be accessed in two ways
|
||||
|
||||
1. Inserting the username and password into the url, or
|
||||
2. Accessing information from a so-called _rc_ file named either
|
||||
`.daprc` or `.dodsrc`
|
||||
`.ncrc` or `.dodsrc`. The latter is deprecated, but will be supported indefinitely.
|
||||
|
||||
## URL-Based Authentication {#auth_url}
|
||||
|
||||
@ -142,6 +142,8 @@ The second column is the affected curl_easy_setopt option(s), if any
|
||||
<tr><td>HTTP.CREDENTIALS.USERNAME</td><td>CURLOPT_USERNAME</td>
|
||||
<tr><td>HTTP.CREDENTIALS.PASSWORD</td><td>CURLOPT_PASSWORD</td>
|
||||
<tr><td>HTTP.NETRC</td><td>N.A.</td><td>Specify path of the .netrc file</td>
|
||||
<tr><td>AWS.PROFILE</td><td>N.A.</td><td>Specify name of a profile in from the .aws/credentials file</td>
|
||||
<tr><td>AWS.REGION</td><td>N.A.</td><td>Specify name of a default region</td>
|
||||
</table>
|
||||
|
||||
### Password Authentication
|
||||
|
@ -1,11 +1,14 @@
|
||||
NetCDF-4 Filter Support
|
||||
============================
|
||||
|
||||
<!-- double header is needed to workaround doxygen bug -->
|
||||
|
||||
# NetCDF-4 Filter Support {#filters}
|
||||
NetCDF-4 Filter Support {#filters}
|
||||
==================================
|
||||
|
||||
\tableofcontents
|
||||
[TOC]
|
||||
|
||||
# Introduction to Filters {#filters_introduction}
|
||||
|
||||
The netCDF library supports a general filter mechanism to apply various
|
||||
kinds of filters to datasets before reading or writing.
|
||||
@ -359,13 +362,14 @@ So it has three parameters:
|
||||
2. "clevel" -- the compression level, 5 in this case.
|
||||
3. "shuffle" -- is the input shuffled before compression, yes (1) in this case.
|
||||
|
||||
NCZarr has three constraints that must be met.
|
||||
NCZarr has four constraints that must be met.
|
||||
|
||||
1. It must store its filter information in its metadata in the above JSON dictionary format.
|
||||
2. It is required to re-use the HDF5 filter implementations.
|
||||
This is to avoid having to rewrite the filter implementations
|
||||
This means that some mechanism is needed to translate between the HDF5 id+parameter model and the Zarr JSON dictionary model.
|
||||
3. It must be possible to modify the set of visible parameters in response to environment information such as the type of the associated variable; this is required to mimic the corresponding HDF5 capability.
|
||||
4. It must be possible to use filters even if HDF5 support is disabled.
|
||||
|
||||
Note that the term "visible parameters" is used here to refer to the parameters provided by "nc_def_var_filter" or those stored in the dataset's metadata as provided by the JSON codec. The term "working parameters" refers to the parameters given to the compressor itself and derived from the visible parameters.
|
||||
|
||||
@ -401,7 +405,7 @@ If this is important, then the filter implementation is responsible for marking
|
||||
|
||||
### Step 2: Convert visible parameters to working parameters
|
||||
|
||||
Given environmental information such as the associated variables base type, the visible parameters
|
||||
Given environmental information such as the associated variable's base type, the visible parameters
|
||||
are converted to a potentially larger set of working parameters; additionally provide the opportunity
|
||||
to modify the visible parameters.
|
||||
|
||||
@ -422,6 +426,7 @@ the netcdf-c API. Rather, one must know the HDF5 id and parameters of
|
||||
the filter of interest and use the functions ''nc_def_var_filter'' and ''nc_inq_var_filter''.
|
||||
Internally, the NCZarr code will use information about known Codecs to convert the HDF5 filter reference to the corresponding Codec.
|
||||
This restriction also holds for the specification of filters in ''ncgen'' and ''nccopy''.
|
||||
This limitation may be lifted in the future.
|
||||
|
||||
## Special Codecs Attribute
|
||||
|
||||
@ -470,6 +475,16 @@ This results in this table for associated codec and hdf5 libraries.
|
||||
<tr><td>Defined<td>Defined<td>NCZarr usable
|
||||
</table>
|
||||
|
||||
## Filter Defaults Library
|
||||
|
||||
As a special case, a shared library may be created to hold
|
||||
defaults for a common set of filters.
|
||||
Basically, there is a specially defined function that returns
|
||||
a vector of codec APIs. These defaults are used only if
|
||||
not other library provided codec information for a filter.
|
||||
Currently, the defaults library provides codec defaults
|
||||
for Shuffle, Fletcher32, Deflate (zlib), and SZIP.
|
||||
|
||||
## Using the Codec API
|
||||
|
||||
Given a set of filters for which the HDF5 API and the Codec API
|
||||
@ -711,30 +726,27 @@ Several functions are exported from the netcdf-c library for use by client progr
|
||||
They are defined in the header file __netcdf_aux.h__.
|
||||
The h5 tag indicates that they assume that the result of the parse is a set of unsigned integers — the format used by HDF5.
|
||||
|
||||
1. ````int ncaux_h5filterspec_parse(const char* txt, unsigned int* idp. size_t* nparamsp, unsigned int** paramsp);````
|
||||
* txt contains the text of a sequence of comma separated constants
|
||||
* idp will contain the first constant — the filter id
|
||||
* nparamsp will contain the number of params
|
||||
* paramsp will contain a vector of params — the caller must free
|
||||
1. ''int ncaux_h5filterspec_parse(const char* txt, unsigned int* idp. size_t* nparamsp, unsigned int** paramsp);''
|
||||
* txt contains the text of a sequence of comma separated constants
|
||||
* idp will contain the first constant — the filter id
|
||||
* nparamsp will contain the number of params
|
||||
* paramsp will contain a vector of params — the caller must free
|
||||
This function can parse single filter spec strings as defined in the section on <a href="#filters_syntax">Filter Specification Syntax</a>.
|
||||
|
||||
2. ````int ncaux_h5filterspec_parselist(const char* txt, int* formatp, size_t* nspecsp, struct NC_H5_Filterspec*** vectorp);````
|
||||
* txt contains the text of a sequence '|' separated filter specs.
|
||||
* formatp currently always returns 0.
|
||||
* nspecsp will return the number of filter specifications.
|
||||
* vectorp will return a pointer to a vector of pointers to filter specification instances — the caller must free.
|
||||
2. ''int ncaux_h5filterspec_parselist(const char* txt, int* formatp, size_t* nspecsp, struct NC_H5_Filterspec*** vectorp);''
|
||||
* txt contains the text of a sequence '|' separated filter specs.
|
||||
* formatp currently always returns 0.
|
||||
* nspecsp will return the number of filter specifications.
|
||||
* vectorp will return a pointer to a vector of pointers to filter specification instances — the caller must free.
|
||||
This function parses a sequence of filter specifications each separated by a '|' character.
|
||||
The text between '|' separators must be parsable by __ncaux_h5filterspec_parse__.
|
||||
|
||||
3. ````void ncaux_h5filterspec_free(struct NC_H5_Filterspec* f);````
|
||||
* f is a pointer to an instance of ````struct NC_H5_Filterspec````
|
||||
3. ''void ncaux_h5filterspec_free(struct NC_H5_Filterspec* f);''
|
||||
* f is a pointer to an instance of ````struct NC_H5_Filterspec````
|
||||
Typically this was returned as an element of the vector returned
|
||||
by __ncaux_h5filterspec_parselist__.
|
||||
This reclaims the parameters of the filter spec object as well as the object itself.
|
||||
|
||||
4. ````int ncaux_h5filterspec_fix8(unsigned char* mem8, int decode);````
|
||||
* mem8 is a pointer to the 8-byte value either to fix.
|
||||
* decode is 1 if the function should apply the 8-byte decoding algorithm
|
||||
4. ''int ncaux_h5filterspec_fix8(unsigned char* mem8, int decode);''
|
||||
* mem8 is a pointer to the 8-byte value either to fix.
|
||||
* decode is 1 if the function should apply the 8-byte decoding algorithm
|
||||
else apply the encoding algorithm.
|
||||
This function implements the 8-byte conversion algorithms for HDF5.
|
||||
Before calling *nc_def_var_filter* (unless *NC_parsefilterspec* was used), the client must call this function with the decode argument set to 0.
|
||||
@ -950,3 +962,4 @@ __Author__: Dennis Heimbigner<br>
|
||||
__Email__: dmh at ucar dot edu<br>
|
||||
__Initial Version__: 1/10/2018<br>
|
||||
__Last Revised__: 7/17/2021
|
||||
|
||||
|
144
docs/nczarr.md
144
docs/nczarr.md
@ -25,8 +25,12 @@ There are some important "caveats" of which to be aware when using this software
|
||||
|
||||
NCZarr uses a data model <a href="#ref_nczarr">[4]</a> that, by design, extends the Zarr Version 2 Specification <a href="#ref_zarrv2">[6]</a> to add support for the NetCDF-4 data model.
|
||||
|
||||
__Note Carefully__: a legal _Zarr_ dataset is also a legal _NCZarr_ dataset with a specific assumption. This assumption is that within Zarr meta-data objects, like __.zarray__, unrecognized dictionary keys are ignored.
|
||||
__Note Carefully__: a legal _NCZarr_ dataset is also a legal _Zarr_ dataset under a specific assumption. This assumption is that within Zarr meta-data objects, like ''.zarray'', unrecognized dictionary keys are ignored.
|
||||
If this assumption is true of an implementation, then the _NCZarr_ dataset is a legal _Zarr_ dataset and should be readable by that _Zarr_ implementation.
|
||||
The inverse is true also. A legal _Zarr_ dataset is also a legal _NCZarr_
|
||||
dataset, where "legal" means it conforms to the Zarr version 2 specification.
|
||||
In addition, certain non-Zarr features are allowed and used.
|
||||
Specifically the XArray ''\_ARRAY\_DIMENSIONS'' attribute is one such.
|
||||
|
||||
There are two other, secondary assumption:
|
||||
|
||||
@ -35,7 +39,8 @@ There are two other, secondary assumption:
|
||||
|
||||
Briefly, the data model supported by NCZarr is netcdf-4 minus the user-defined types and the String type.
|
||||
As with netcdf-4 chunking is supported.
|
||||
Filters and compression are supported, but the companion document on filters
|
||||
Filters and compression are supported, but
|
||||
[the companion document on filters](./md_filters.html "filters")
|
||||
should be consulted for the details.
|
||||
|
||||
Specifically, the model supports the following.
|
||||
@ -88,12 +93,15 @@ There are some details that are important.
|
||||
to "https" plus setting "mode=nczarr,s3" (see below).
|
||||
Specifying "file" is mostly used for testing, but is used to support
|
||||
directory tree or zipfile format storage.
|
||||
- Host: Amazon S3 defines two forms: _Virtual_ and _Path_.
|
||||
- Host: Amazon S3 defines three forms: _Virtual_, _Path_, and _S3_
|
||||
+ _Virtual_: the host includes the bucket name as in
|
||||
__bucket.s3.<region>.amazonaws.com__
|
||||
+ _Path_: the host does not include the bucket name, but
|
||||
rather the bucket name is the first segment of the path.
|
||||
For example __s3.<region>.amazonaws.com/bucket__
|
||||
+ _S3_: the protocol is "s3:" and if the host is a single name,
|
||||
then it is interpreted as the bucket. The region is determined
|
||||
using the algorithm in Appendix E.
|
||||
+ _Other_: It is possible to use other non-Amazon cloud storage, but
|
||||
that is cloud library dependent.
|
||||
- Query: currently not used.
|
||||
@ -248,7 +256,7 @@ The primary zmap implementation is _s3_ (i.e. _mode=nczarr,s3_) and indicates th
|
||||
Another storage format uses a file system tree of directories and files (_mode=nczarr,file_).
|
||||
A third storage format uses a zip file (_mode=nczarr,zip_).
|
||||
The latter two are used mostly for debugging and testing.
|
||||
However, the _file_ and _zip_ formats are important because they is intended to match corresponding storage formats used by the Python Zarr implementation.
|
||||
However, the _file_ and _zip_ formats are important because they are intended to match corresponding storage formats used by the Python Zarr implementation.
|
||||
Hence it should serve to provide interoperability between NCZarr and the Python Zarr, although this interoperability has not been tested.
|
||||
|
||||
Examples of the typical URL form for _file_ and _zip_ are as follows.
|
||||
@ -266,9 +274,10 @@ As with other URLS (e.g. DAP), these kind of URLS can be passed as the path argu
|
||||
|
||||
# NCZarr versus Pure Zarr. {#nczarr_purezarr}
|
||||
|
||||
The NCZARR format extends the pure Zarr format by adding extra keys such as _\_NCZARR\_ARRAY_ inside the _.zarray_ object.
|
||||
The NCZARR format extends the pure Zarr format by adding extra keys such as ''\_NCZARR\_ARRAY'' inside the ''.zarray'' object.
|
||||
It is possible to suppress the use of these extensions so that the netcdf library can read and write a pure zarr formatted file.
|
||||
This is controlled by using _mode=nczarr,zarr_ combination.
|
||||
This is controlled by using ''mode=zarr'', which is an alias for the
|
||||
''mode=nczarr,zarr'' combination.
|
||||
The primary effects of using pure zarr are described in the [Translation Section](@ref nczarr_translation).
|
||||
|
||||
There are some constraints on the reading of Zarr datasets using the NCZarr implementation.
|
||||
@ -276,10 +285,12 @@ There are some constraints on the reading of Zarr datasets using the NCZarr impl
|
||||
1. Zarr allows some primitive types not recognized by NCZarr.
|
||||
Over time, the set of unrecognized types is expected to diminish.
|
||||
Examples of currently unsupported types are as follows:
|
||||
* "c" -- complex floating point
|
||||
* "m" -- timedelta
|
||||
* "M" -- datetime
|
||||
* "c" -- complex floating point
|
||||
* "m" -- timedelta
|
||||
* "M" -- datetime
|
||||
2. The Zarr dataset may reference filters and compressors unrecognized by NCZarr.
|
||||
3. The Zarr dataset may store data in column-major order instead of row-major order. The effect of encountering such a dataset is to output the data in the wrong order.
|
||||
|
||||
Again, this list should diminish over time.
|
||||
|
||||
# Notes on Debugging NCZarr Access {#nczarr_debug}
|
||||
@ -299,7 +310,7 @@ Note that this is different from zlib.
|
||||
|
||||
The Amazon AWS S3 storage driver currently uses the Amazon AWS S3 Software Development Kit for C++ (aws-s3-sdk-cpp).
|
||||
In order to use it, the client must provide some configuration information.
|
||||
Specifically, the `~/.aws/config` file should contain something like this.
|
||||
Specifically, the ''~/.aws/config'' file should contain something like this.
|
||||
|
||||
```
|
||||
[default]
|
||||
@ -307,6 +318,8 @@ output = json
|
||||
aws_access_key_id=XXXX...
|
||||
aws_secret_access_key=YYYY...
|
||||
```
|
||||
See Appendix E for additional information.
|
||||
|
||||
|
||||
## Addressing Style
|
||||
|
||||
@ -446,6 +459,15 @@ Here are a couple of examples using the _ncgen_ and _ncdump_ utilities.
|
||||
```
|
||||
ncgen -4 -lb -o "s3://s3.uswest-1.amazonaws.com/datasetbucket#mode=zarr" dataset.cdl
|
||||
```
|
||||
5. Create an nczarr file using the s3 protocol with a specific profile
|
||||
```
|
||||
ncgen -4 -lb -o "s3://datasetbucket/rootkey#mode=nczarr,awsprofile=unidata" dataset.cdl
|
||||
```
|
||||
Note that the URLis internally translated to this
|
||||
````
|
||||
https://s2.<region>.amazonaws.com/datasetbucket/rootkey#mode=nczarr,awsprofile=unidata" dataset.cdl
|
||||
````
|
||||
The region is from the algorithm described in Appendix E1.
|
||||
|
||||
# References {#nczarr_bib}
|
||||
|
||||
@ -479,7 +501,7 @@ Currently the following build cases are known to work.
|
||||
|
||||
Note: S3 support includes both compiling the S3 support code as well as running the S3 tests.
|
||||
|
||||
# Automake
|
||||
## Automake
|
||||
|
||||
There are several options relevant to NCZarr support and to Amazon S3 support.
|
||||
These are as follows.
|
||||
@ -498,7 +520,7 @@ The above assumes that these libraries were installed in '/usr/local/lib', so th
|
||||
|
||||
Note also that if S3 support is enabled, then you need to have a C++ compiler installed because part of the S3 support code is written in C++.
|
||||
|
||||
# CMake {#nczarr_cmake}
|
||||
## CMake {#nczarr_cmake}
|
||||
|
||||
The necessary CMake flags are as follows (with defaults)
|
||||
|
||||
@ -530,7 +552,7 @@ This can be useful if blanks in path names cause problems in your build environm
|
||||
|
||||
The relevant tests for S3 support are in the _nczarr_test_ directory.
|
||||
Currently, by default, testing of S3 with NCZarr is supported only for Unidata members of the NetCDF Development Group.
|
||||
This is because it uses a specific bucket on a specific internal S3 appliance that is inaccessible to the general user.
|
||||
This is because it uses a Unidata-specific bucket is inaccessible to the general user.
|
||||
|
||||
However, an untested mechanism exists by which others may be able to run the S3 specific tests.
|
||||
If someone else wants to attempt these tests, then they need to define the following environment variables:
|
||||
@ -558,15 +580,37 @@ Building this package from scratch has proven to be a formidable task.
|
||||
This appears to be due to dependencies on very specific versions of,
|
||||
for example, openssl.
|
||||
|
||||
It is recommended that you use conda to install this package on linux.
|
||||
See [11] for the relevant conda package information.
|
||||
However, the following context does work. Of course your mileage may vary.
|
||||
* OS: ubuntu 21
|
||||
* aws-sdk-cpp version 1.9.96 or later?
|
||||
* Required installed libraries: openssl, libcurl, cmake, ninja (ninja-build in apt)
|
||||
|
||||
The recipe used:
|
||||
````
|
||||
git clone --recurse-submodules https://www.github.com/aws/aws-sdk-cpp
|
||||
pushd aws-sdk-cpp
|
||||
mkdir build
|
||||
cd build
|
||||
PREFIX=/usr/local
|
||||
FLAGS="-DCMAKE_INSTALL_PREFIX=${PREFIX}
|
||||
-DCMAKE_INSTALL_LIBDIR=lib \
|
||||
-DCMAKE_MODULE_PATH=${PREFIX}/lib/cmake \
|
||||
-DCMAKE_POLICY_DEFAULT_CMP0075=NEW \
|
||||
-DBUILD_ONLY=s3 \
|
||||
-DENABLE_UNITY_BUILD=ON \
|
||||
-DENABLE_TESTING=OFF \
|
||||
-DCMAKE_BUILD_TYPE=$CFG \
|
||||
-DSIMPLE_INSTALL=ON"
|
||||
cmake -GNinja $FLAGS ..
|
||||
ninja all
|
||||
ninja install
|
||||
cd ..
|
||||
popd
|
||||
````
|
||||
|
||||
For Windows we do not yet have solution. If you successfully install
|
||||
on Windows, please let us know how you did it.
|
||||
|
||||
This library depends on libcurl and openssl, so you may need to install those
|
||||
before installing the sdk library.
|
||||
|
||||
# Appendix C. Amazon S3 Imposed Limits {#nczarr_s3limits}
|
||||
|
||||
The Amazon S3 cloud storage imposes some significant limits that are inherited by NCZarr (and Zarr also, for that matter).
|
||||
@ -594,28 +638,72 @@ https://s3.us-east-1.amazonaws.com/noaa-goes16/ABI-L1b-RadC/2017/059/03/OR_ABI-L
|
||||
````
|
||||
Note that for S3 access, it is expected that the URL is in what is called "path" format where the bucket, _noaa-goes16_ in this case, is part of the URL path instead of the host.
|
||||
|
||||
The _#mode=byterange_ mechanism generalizes to work with most servers that support byte-range access.
|
||||
The _#mode=bytes_ mechanism generalizes to work with most servers that support byte-range access.
|
||||
|
||||
Specifically, Thredds servers support such access using the HttpServer access method as can be seen from this URL taken from the above test program.
|
||||
````
|
||||
https://thredds-test.unidata.ucar.edu/thredds/fileServer/irma/metar/files/METAR_20170910_0000.nc#bytes
|
||||
````
|
||||
|
||||
## Byte-Range Authorization
|
||||
# Appendix E. AWS Selection Algorithms.
|
||||
|
||||
If using byte-range access, it may be necessary to tell the netcdf-c
|
||||
library about the so-called secretid and accessid values.
|
||||
These are usually stored in the file ````~/.aws/config````
|
||||
and/or ````~/.aws/credentials````.
|
||||
In the latter file, this
|
||||
might look like this.
|
||||
If byterange support is enabled, the netcdf-c library will parse the files
|
||||
````
|
||||
[default]
|
||||
${HOME}/.aws/config
|
||||
and
|
||||
${HOME}/.aws/credentials
|
||||
````
|
||||
to extract profile names plus a list
|
||||
of key=value pairs. This example is typical.
|
||||
````
|
||||
[default]
|
||||
aws_access_key_id=XXXXXXXXXXXXXXXXXXXX
|
||||
aws_secret_access_key=YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY
|
||||
aws_region=ZZZZZZZZZ
|
||||
````
|
||||
The keys in the profile will be used to set various parameters in the library
|
||||
|
||||
# Appendix E. NCZarr Version 1 Meta-Data Representation
|
||||
## Profile Selection
|
||||
|
||||
The algorithm for choosing the active profile to use is as follows:
|
||||
|
||||
1. If the "aws.profile" fragment flag is defined in the URL, then it is used. For example, see this URL.
|
||||
````
|
||||
https://...#mode=nczarr,s3&aws.profile=xxx
|
||||
````
|
||||
2. If the "AWS.PROFILE" entry in the .rc file (i.e. .netrc or .dodsrc) is set, then it is used.
|
||||
3. Otherwise the profile "default" is used.
|
||||
|
||||
The profile named "none" is a special profile that the netcdf-c library automatically defines.
|
||||
It should not be defined anywhere else. It signals to the library that no credentialas are to used.
|
||||
It is equivalent to the "--no-sign-request" option in the AWS CLI.
|
||||
Also, it must be explicitly specified by name. Otherwise "default" will be used.
|
||||
|
||||
## Region Selection
|
||||
|
||||
If the specified URL is of the form
|
||||
````
|
||||
s3://<bucket>/key
|
||||
````
|
||||
Then this is rebuilt to this form:
|
||||
````
|
||||
s3://s2.<region>.amazonaws.com>/key
|
||||
````
|
||||
However this requires figuring out the region to use.
|
||||
The algorithm for picking an region is as follows.
|
||||
|
||||
1. If the "aws.region" fragment flag is defined in the URL, then it is used.
|
||||
2. The active profile is searched for the "aws_region" key.
|
||||
3. If the "AWS.REGION" entry in the .rc file (i.e. .netrc or .dodsrc) is set, then it is used.
|
||||
4. Otherwise use "us-east-1" region.
|
||||
|
||||
## Authorization Selection
|
||||
|
||||
Picking an access-key/secret-key pair is always determined
|
||||
by the current active profile. To choose to not use keys
|
||||
requires that the active profile must be "none".
|
||||
|
||||
# Appendix F. NCZarr Version 1 Meta-Data Representation
|
||||
|
||||
In NCZarr Version 1, the NCZarr specific metadata was represented using new objects rather than as keys in existing Zarr objects.
|
||||
Due to conflicts with the Zarr specification, that format is deprecated in favor of the one described above.
|
||||
|
@ -20,7 +20,7 @@ nc4internal.h nctime.h nc3internal.h onstack.h ncrc.h ncauth.h \
|
||||
ncoffsets.h nctestserver.h nc4dispatch.h nc3dispatch.h ncexternl.h \
|
||||
ncpathmgr.h ncindex.h hdf4dispatch.h hdf5internal.h nc_provenance.h \
|
||||
hdf5dispatch.h ncmodel.h isnan.h nccrc.h ncexhash.h ncxcache.h \
|
||||
ncfilter.h ncjson.h
|
||||
ncfilter.h ncjson.h ezxml.h
|
||||
|
||||
if USE_DAP
|
||||
noinst_HEADERS += ncdap.h
|
||||
|
@ -57,7 +57,61 @@ struct ezxml {
|
||||
/* structure. For efficiency, modifies the data by adding null terminators*/
|
||||
/* and decoding ampersand sequences. If you don't want this, copy the data and*/
|
||||
/* pass in the copy. Returns NULL on failure.*/
|
||||
ezxml_t ezxml_parse_str(char *s, size_t len);
|
||||
ezxml_t nc_ezxml_parse_str(char *s, size_t len);
|
||||
|
||||
/* returns the first child tag (one level deeper) with the given name or NULL*/
|
||||
/* if not found*/
|
||||
ezxml_t nc_ezxml_child(ezxml_t xml, const char *name);
|
||||
|
||||
/* returns the next tag of the same name in the same section and depth or NULL*/
|
||||
/* if not found*/
|
||||
#define nc_ezxml_next(xml) ((xml) ? xml->next : NULL)
|
||||
|
||||
/* Returns the Nth tag with the same name in the same section at the same depth*/
|
||||
/* or NULL if not found. An index of 0 returns the tag given.*/
|
||||
ezxml_t nc_ezxml_idx(ezxml_t xml, int idx);
|
||||
|
||||
/* returns the name of the given tag*/
|
||||
#define nc_ezxml_name(xml) ((xml) ? xml->name : NULL)
|
||||
|
||||
/* returns the given tag's character content or empty string if none*/
|
||||
#define nc_ezxml_txt(xml) ((xml) ? xml->txt : "")
|
||||
|
||||
/* returns the value of the requested tag attribute, or NULL if not found*/
|
||||
const char *nc_ezxml_attr(ezxml_t xml, const char *attr);
|
||||
|
||||
/* Traverses the ezxml structure to retrieve a specific subtag. Takes a*/
|
||||
/* variable length list of tag names and indexes. The argument list must be*/
|
||||
/* terminated by either an index of -1 or an empty string tag name. Example: */
|
||||
/* title = ezxml_get(library, "shelf", 0, "book", 2, "title", -1);*/
|
||||
/* This retrieves the title of the 3rd book on the 1st shelf of library.*/
|
||||
/* Returns NULL if not found.*/
|
||||
ezxml_t nc_ezxml_get(ezxml_t xml, ...);
|
||||
|
||||
/* Converts an ezxml structure back to xml. Returns a string of xml data that*/
|
||||
/* must be freed.*/
|
||||
char *nc_ezxml_toxml(ezxml_t xml);
|
||||
|
||||
/* returns a NULL terminated array of processing instructions for the given*/
|
||||
/* target*/
|
||||
const char **nc_ezxml_pi(ezxml_t xml, const char *target);
|
||||
|
||||
/* frees the memory allocated for an ezxml structure*/
|
||||
void nc_ezxml_free(ezxml_t xml);
|
||||
|
||||
/* returns parser error message or empty string if none*/
|
||||
const char *nc_ezxml_error(ezxml_t xml);
|
||||
|
||||
const char** nc_ezxml_all_attr(ezxml_t xml, int* countp);
|
||||
|
||||
|
||||
#if 0
|
||||
|
||||
/* returns a new empty ezxml structure with the given root tag name*/
|
||||
ezxml_t nc_ezxml_new(const char *name);
|
||||
|
||||
/* wrapper for ezxml_new() that strdup()s name*/
|
||||
#define nc_ezxml_new_d(name) ezxml_set_flag(ezxml_new(strdup(name)), EZXML_NAMEM)
|
||||
|
||||
/* A wrapper for ezxml_parse_str() that accepts a file descriptor. First*/
|
||||
/* attempts to mem map the file. Failing that, reads the file into memory.*/
|
||||
@ -72,55 +126,6 @@ ezxml_t ezxml_parse_file(const char *file);
|
||||
/* or ezxml_parse_fd()*/
|
||||
ezxml_t ezxml_parse_fp(FILE *fp);
|
||||
|
||||
/* returns the first child tag (one level deeper) with the given name or NULL*/
|
||||
/* if not found*/
|
||||
ezxml_t ezxml_child(ezxml_t xml, const char *name);
|
||||
|
||||
/* returns the next tag of the same name in the same section and depth or NULL*/
|
||||
/* if not found*/
|
||||
#define ezxml_next(xml) ((xml) ? xml->next : NULL)
|
||||
|
||||
/* Returns the Nth tag with the same name in the same section at the same depth*/
|
||||
/* or NULL if not found. An index of 0 returns the tag given.*/
|
||||
ezxml_t ezxml_idx(ezxml_t xml, int idx);
|
||||
|
||||
/* returns the name of the given tag*/
|
||||
#define ezxml_name(xml) ((xml) ? xml->name : NULL)
|
||||
|
||||
/* returns the given tag's character content or empty string if none*/
|
||||
#define ezxml_txt(xml) ((xml) ? xml->txt : "")
|
||||
|
||||
/* returns the value of the requested tag attribute, or NULL if not found*/
|
||||
const char *ezxml_attr(ezxml_t xml, const char *attr);
|
||||
|
||||
/* Traverses the ezxml structure to retrieve a specific subtag. Takes a*/
|
||||
/* variable length list of tag names and indexes. The argument list must be*/
|
||||
/* terminated by either an index of -1 or an empty string tag name. Example: */
|
||||
/* title = ezxml_get(library, "shelf", 0, "book", 2, "title", -1);*/
|
||||
/* This retrieves the title of the 3rd book on the 1st shelf of library.*/
|
||||
/* Returns NULL if not found.*/
|
||||
ezxml_t ezxml_get(ezxml_t xml, ...);
|
||||
|
||||
/* Converts an ezxml structure back to xml. Returns a string of xml data that*/
|
||||
/* must be freed.*/
|
||||
char *ezxml_toxml(ezxml_t xml);
|
||||
|
||||
/* returns a NULL terminated array of processing instructions for the given*/
|
||||
/* target*/
|
||||
const char **ezxml_pi(ezxml_t xml, const char *target);
|
||||
|
||||
/* frees the memory allocated for an ezxml structure*/
|
||||
void ezxml_free(ezxml_t xml);
|
||||
|
||||
/* returns parser error message or empty string if none*/
|
||||
const char *ezxml_error(ezxml_t xml);
|
||||
|
||||
/* returns a new empty ezxml structure with the given root tag name*/
|
||||
ezxml_t ezxml_new(const char *name);
|
||||
|
||||
/* wrapper for ezxml_new() that strdup()s name*/
|
||||
#define ezxml_new_d(name) ezxml_set_flag(ezxml_new(strdup(name)), EZXML_NAMEM)
|
||||
|
||||
/* Adds a child tag. off is the offset of the child tag relative to the start*/
|
||||
/* of the parent tag's character content. Returns the child tag.*/
|
||||
ezxml_t ezxml_add_child(ezxml_t xml, const char *name, size_t off);
|
||||
@ -160,6 +165,8 @@ ezxml_t ezxml_insert(ezxml_t xml, ezxml_t dest, size_t off);
|
||||
/* removes a tag along with all its subtags*/
|
||||
#define ezxml_remove(xml) ezxml_free(ezxml_cut(xml))
|
||||
|
||||
#endif /*0*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
@ -64,7 +64,7 @@ typedef struct NC_HDF5_FILE_INFO {
|
||||
struct HTTP {
|
||||
NCURI* uri; /* Parse of the incoming path, if url */
|
||||
int iosp; /* We are using the S3 rawvirtual file driver */
|
||||
struct NCauth* auth;
|
||||
struct NCauth* auth;
|
||||
} http;
|
||||
#endif
|
||||
} NC_HDF5_FILE_INFO_T;
|
||||
|
@ -49,15 +49,22 @@ typedef struct NCauth {
|
||||
char *user; /*CURLOPT_USERNAME*/
|
||||
char *pwd; /*CURLOPT_PASSWORD*/
|
||||
} creds;
|
||||
struct s3credentials {
|
||||
char *accessid;
|
||||
char *secretkey;
|
||||
} s3creds;
|
||||
char* s3profile;
|
||||
} NCauth;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern int NC_authsetup(NCauth**, NCURI*);
|
||||
extern void NC_authfree(NCauth*);
|
||||
extern char* NC_combinehostport(NCURI*);
|
||||
extern int NC_parsecredentials(const char* userpwd, char** userp, char** pwdp);
|
||||
|
||||
extern int NC_authgets3creds(NCauth* auth, const char* profile, const char** accessidp, const char** secretkeyp);
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*NCAUTH_H*/
|
||||
|
@ -48,7 +48,7 @@ EXTERNL int ncbytesremove(NCbytes*,unsigned long);
|
||||
EXTERNL int ncbytescat(NCbytes*,const char*);
|
||||
|
||||
/* Set the contents of the buffer; mark the buffer as non-extendible */
|
||||
EXTERNL int ncbytessetcontents(NCbytes*, char*, unsigned long);
|
||||
EXTERNL int ncbytessetcontents(NCbytes*, void*, unsigned long);
|
||||
|
||||
/* Following are always "in-lined"*/
|
||||
#define ncbyteslength(bb) ((bb)!=NULL?(bb)->length:0)
|
||||
|
@ -9,7 +9,7 @@
|
||||
#define NCHTTP_H
|
||||
|
||||
typedef enum HTTPMETHOD {
|
||||
HTTPNONE=0, HTTPGET=1, HTTPPUT=2, HTTPPOST=3, HTTPHEAD=4
|
||||
HTTPNONE=0, HTTPGET=1, HTTPPUT=2, HTTPPOST=3, HTTPHEAD=4, HTTPDELETE=5
|
||||
} HTTPMETHOD;
|
||||
|
||||
struct CURL; /* Forward */
|
||||
@ -17,16 +17,33 @@ struct CURL; /* Forward */
|
||||
typedef struct NC_HTTP_STATE {
|
||||
struct CURL* curl;
|
||||
long httpcode;
|
||||
const char** headset; /* which headers to capture */
|
||||
NClist* headers;
|
||||
NCbytes* buf;
|
||||
struct Response {
|
||||
NClist* headset; /* which headers to capture */
|
||||
NClist* headers; /* Set of captured headers */
|
||||
NCbytes* buf; /* response content; call owns; do not free */
|
||||
} response;
|
||||
struct Request {
|
||||
HTTPMETHOD method;
|
||||
size_t payloadsize;
|
||||
void* payload; /* caller owns; do not free */
|
||||
size_t payloadpos;
|
||||
NClist* headers;
|
||||
} request;
|
||||
char errbuf[1024]; /* assert(CURL_ERROR_SIZE <= 1024) */
|
||||
} NC_HTTP_STATE;
|
||||
|
||||
extern int nc_http_open(const char* objecturl, NC_HTTP_STATE** state, long long* lenp);
|
||||
extern int nc_http_init(NC_HTTP_STATE** state);
|
||||
extern int nc_http_init_verbose(NC_HTTP_STATE** state, int verbose);
|
||||
extern int nc_http_size(NC_HTTP_STATE* state, const char* url, long long* sizep);
|
||||
extern int nc_http_read(NC_HTTP_STATE* state, const char* url, size64_t start, size64_t count, NCbytes* buf);
|
||||
extern int nc_http_write(NC_HTTP_STATE* state, const char* url, NCbytes* payload);
|
||||
extern int nc_http_close(NC_HTTP_STATE* state);
|
||||
extern int nc_http_headers(NC_HTTP_STATE* state, const NClist** headersp); /* only if headerson */
|
||||
extern int nc_http_reset(NC_HTTP_STATE* state);
|
||||
extern int nc_http_set_method(NC_HTTP_STATE* state, HTTPMETHOD method);
|
||||
extern int nc_http_set_response(NC_HTTP_STATE* state, NCbytes* buf);
|
||||
extern int nc_http_set_payload(NC_HTTP_STATE* state, size_t len, void* payload);
|
||||
extern int nc_http_response_headset(NC_HTTP_STATE* state, const NClist* headers); /* Set of headers to capture */
|
||||
extern int nc_http_response_headers(NC_HTTP_STATE* state, NClist** headersp); /* set of captured headers */
|
||||
extern int nc_http_request_setheaders(NC_HTTP_STATE* state, const NClist* headers); /* set of extra request headers */
|
||||
|
||||
#endif /*NCHTTP_H*/
|
||||
|
@ -29,7 +29,7 @@ EXTERNL int nclistsetlength(NClist*,size_t);
|
||||
/* Set the ith element; will overwrite previous contents; expand if needed */
|
||||
EXTERNL int nclistset(NClist*,size_t,void*);
|
||||
/* Get value at position i */
|
||||
EXTERNL void* nclistget(NClist*,size_t);/* Return the ith element of l */
|
||||
EXTERNL void* nclistget(const NClist*,size_t);/* Return the ith element of l */
|
||||
/* Insert at position i; will push up elements i..|seq|. */
|
||||
EXTERNL int nclistinsert(NClist*,size_t,void*);
|
||||
/* Remove element at position i; will move higher elements down */
|
||||
@ -53,7 +53,7 @@ EXTERNL int nclistelemremove(NClist* l, void* elem);
|
||||
EXTERNL int nclistunique(NClist*);
|
||||
|
||||
/* Create a clone of a list; if deep, then assume it is a list of strings */
|
||||
EXTERNL NClist* nclistclone(NClist*, int deep);
|
||||
EXTERNL NClist* nclistclone(const NClist*, int deep);
|
||||
|
||||
EXTERNL void* nclistextract(NClist*);
|
||||
|
||||
|
@ -167,6 +167,7 @@ EXTERNL int NCclosedir(DIR* ent);
|
||||
#define NCmkstemp(buf) mkstemp(buf);
|
||||
#define NCcwd(buf, len) getcwd(buf,len)
|
||||
#define NCrmdir(path) rmdir(path)
|
||||
#define NCunlink(path) unlink(path)
|
||||
#ifdef HAVE_SYS_STAT_H
|
||||
#define NCstat(path,buf) stat(path,buf)
|
||||
#endif
|
||||
|
@ -20,18 +20,23 @@ and accessing rc files (e.g. .daprc).
|
||||
#define NCRCENVIGNORE "NCRCENV_IGNORE"
|
||||
#define NCRCENVRC "NCRCENV_RC"
|
||||
|
||||
/* Known .aws profile keys */
|
||||
#define AWS_ACCESS_KEY_ID "aws_access_key_id"
|
||||
#define AWS_SECRET_ACCESS_KEY "aws_secret_access_key"
|
||||
#define AWS_REGION "aws_region"
|
||||
|
||||
typedef struct NCTriple {
|
||||
typedef struct NCRCentry {
|
||||
char* host; /* combined host:port */
|
||||
char* path; /* prefix to match or NULL */
|
||||
char* key;
|
||||
char* value;
|
||||
} NCTriple;
|
||||
} NCRCentry;
|
||||
|
||||
/* collect all the relevant info around the rc file */
|
||||
typedef struct NCRCinfo {
|
||||
int ignore; /* if 1, then do not use any rc file */
|
||||
int loaded; /* 1 => already loaded */
|
||||
NClist* triples; /* the rc file triple store fields*/
|
||||
NClist* entries; /* the rc file entry store fields*/
|
||||
char* rcfile; /* specified rcfile; overrides anything else */
|
||||
} NCRCinfo;
|
||||
|
||||
@ -45,21 +50,38 @@ typedef struct NCRCglobalstate {
|
||||
struct GlobalZarr { /* Zarr specific parameters */
|
||||
char dimension_separator;
|
||||
} zarr;
|
||||
struct S3credentials {
|
||||
NClist* profiles; /* NClist<struct AWSprofile*> */
|
||||
} s3creds;
|
||||
} NCRCglobalstate;
|
||||
|
||||
struct AWSprofile {
|
||||
char* name;
|
||||
NClist* entries; /* NClist<struct AWSentry*> */
|
||||
};
|
||||
|
||||
struct AWSentry {
|
||||
char* key;
|
||||
char* value;
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* From drc.c */
|
||||
EXTERNL int ncrc_createglobalstate(void);
|
||||
EXTERNL void ncrc_initialize(void);
|
||||
EXTERNL void ncrc_freeglobalstate(void);
|
||||
/* read and compile the rc file, if any */
|
||||
EXTERNL int NC_rcload(void);
|
||||
EXTERNL char* NC_rclookup(const char* key, const char* hostport);
|
||||
EXTERNL int NC_rcfile_insert(const char* key, const char* value, const char* hostport);
|
||||
EXTERNL int NC_rcfile_insert(const char* key, const char* value, const char* hostport, const char* path);
|
||||
EXTERNL char* NC_rclookup(const char* key, const char* hostport, const char* path);
|
||||
EXTERNL char* NC_rclookupx(NCURI* uri, const char* key);
|
||||
|
||||
/* Following are primarily for debugging */
|
||||
/* Obtain the count of number of triples */
|
||||
/* Obtain the count of number of entries */
|
||||
EXTERNL size_t NC_rcfile_length(NCRCinfo*);
|
||||
/* Obtain the ith triple; return NULL if out of range */
|
||||
EXTERNL NCTriple* NC_rcfile_ith(NCRCinfo*,size_t);
|
||||
/* Obtain the ith entry; return NULL if out of range */
|
||||
EXTERNL NCRCentry* NC_rcfile_ith(NCRCinfo*,size_t);
|
||||
|
||||
/* For internal use */
|
||||
EXTERNL NCRCglobalstate* ncrc_getglobalstate(void);
|
||||
@ -75,4 +97,16 @@ EXTERNL int NC_writefile(const char* filename, size_t size, void* content);
|
||||
EXTERNL char* NC_mktmp(const char* base);
|
||||
EXTERNL int NC_getmodelist(const char* url, NClist** modelistp);
|
||||
EXTERNL int NC_testmode(const char* path, const char* tag);
|
||||
EXTERNL int NC_split_delim(const char* path, char delim, NClist* segments);
|
||||
EXTERNL int NC_s3urlrebuild(NCURI* url, NCURI** newurlp, char** bucketp, char** regionp);
|
||||
EXTERNL int NC_getactives3profile(NCURI* uri, const char** profilep);
|
||||
EXTERNL int NC_getdefaults3region(NCURI* uri, const char** regionp);
|
||||
/* S3 profiles */
|
||||
EXTERNL int NC_authgets3profile(const char* profile, struct AWSprofile** profilep);
|
||||
EXTERNL int NC_s3profilelookup(const char* profile, const char* key, const char** valuep);
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*NCRC_H*/
|
||||
|
@ -67,6 +67,9 @@ EXTERNL void ncurifree(NCURI* ncuri);
|
||||
/* Replace the protocol */
|
||||
EXTERNL int ncurisetprotocol(NCURI*,const char* newprotocol);
|
||||
|
||||
/* Replace the host */
|
||||
EXTERNL int ncurisethost(NCURI*,const char* newhost);
|
||||
|
||||
/* Replace the path */
|
||||
EXTERNL int ncurisetpath(NCURI*,const char* newpath);
|
||||
|
||||
@ -112,6 +115,9 @@ EXTERNL char* ncuriencodeonly(const char* s, const char* allowable);
|
||||
/* Encode user or pwd */
|
||||
EXTERNL char* ncuriencodeuserpwd(const char* s);
|
||||
|
||||
/* Deep clone a uri */
|
||||
EXTERNL NCURI* ncuriclone(NCURI*);
|
||||
|
||||
#if defined(_CPLUSPLUS_) || defined(__CPLUSPLUS__) || defined(__CPLUSPLUS)
|
||||
}
|
||||
#endif
|
||||
|
@ -4,7 +4,7 @@
|
||||
# University Corporation for Atmospheric Research/Unidata.
|
||||
|
||||
# See netcdf-c/COPYRIGHT file for more info.
|
||||
SET(dap4_SOURCES d4curlfunctions.c d4fix.c d4data.c d4file.c d4parser.c d4meta.c d4varx.c d4dump.c d4swap.c d4chunk.c d4printer.c d4read.c d4http.c d4util.c d4odom.c d4cvt.c d4debug.c ncd4dispatch.c ezxml_extra.c ezxml.c)
|
||||
SET(dap4_SOURCES d4curlfunctions.c d4fix.c d4data.c d4file.c d4parser.c d4meta.c d4varx.c d4dump.c d4swap.c d4chunk.c d4printer.c d4read.c d4http.c d4util.c d4odom.c d4cvt.c d4debug.c ncd4dispatch.c)
|
||||
|
||||
add_library(dap4 OBJECT ${dap4_SOURCES})
|
||||
|
||||
|
@ -35,9 +35,7 @@ d4util.c \
|
||||
d4odom.c \
|
||||
d4cvt.c \
|
||||
d4debug.c \
|
||||
ncd4dispatch.c \
|
||||
ezxml_extra.c \
|
||||
ezxml.c
|
||||
ncd4dispatch.c
|
||||
|
||||
HDRS= \
|
||||
ncd4dispatch.h \
|
||||
@ -51,8 +49,7 @@ d4util.h \
|
||||
d4debug.h \
|
||||
d4odom.h \
|
||||
d4bytes.h \
|
||||
d4includes.h \
|
||||
ezxml.h
|
||||
d4includes.h
|
||||
|
||||
if ENABLE_DAP4
|
||||
if USE_NETCDF4
|
||||
@ -66,14 +63,3 @@ libdap4_la_CPPFLAGS = $(AM_CPPFLAGS)
|
||||
libdap4_la_LIBADD =
|
||||
|
||||
endif # ENABLE_DAP4
|
||||
|
||||
# Show what is needed to insert a new version of ezxml
|
||||
# primary fix: The original ezxml.[ch] uses '//' comments;
|
||||
# unpack and replace with '/*..*/'
|
||||
EZXML=ezxml-0.8.6.tar.gz
|
||||
ezxml::
|
||||
rm -fr ./ezxml ./ezxml.[ch] ./license.txt
|
||||
tar -zxf ./${EZXML}
|
||||
sed -e 's|//\(.*\)|/*\1*/|' <ezxml/ezxml.c >./ezxml.c
|
||||
sed -e 's|//\(.*\)|/*\1*/|' <ezxml/ezxml.h >./ezxml.h
|
||||
cp ezxml/license.txt .
|
||||
|
@ -319,7 +319,7 @@ NCD4_get_rcproperties(NCD4INFO* state)
|
||||
ncerror err = NC_NOERR;
|
||||
char* option = NULL;
|
||||
#ifdef HAVE_CURLOPT_BUFFERSIZE
|
||||
option = NC_rclookup(D4BUFFERSIZE,state->uri->uri);
|
||||
option = NC_rclookup(D4BUFFERSIZE,state->uri->uri,NULL);
|
||||
if(option != NULL && strlen(option) != 0) {
|
||||
long bufsize;
|
||||
if(strcasecmp(option,"max")==0)
|
||||
@ -330,7 +330,7 @@ NCD4_get_rcproperties(NCD4INFO* state)
|
||||
}
|
||||
#endif
|
||||
#ifdef HAVE_CURLOPT_KEEPALIVE
|
||||
option = NC_rclookup(D4KEEPALIVE,state->uri->uri);
|
||||
option = NC_rclookup(D4KEEPALIVE,state->uri->uri,NULL);
|
||||
if(option != NULL && strlen(option) != 0) {
|
||||
/* The keepalive value is of the form 0 or n/m,
|
||||
where n is the idle time and m is the interval time;
|
||||
|
@ -171,7 +171,7 @@ NCD4_parse(NCD4meta* metadata)
|
||||
if(parser == NULL) {ret=NC_ENOMEM; goto done;}
|
||||
parser->metadata = metadata;
|
||||
ilen = strlen(parser->metadata->serial.dmr);
|
||||
dom = ezxml_parse_str(parser->metadata->serial.dmr,ilen);
|
||||
dom = nc_ezxml_parse_str(parser->metadata->serial.dmr,ilen);
|
||||
if(dom == NULL) {ret=NC_ENOMEM; goto done;}
|
||||
parser->types = nclistnew();
|
||||
parser->dims = nclistnew();
|
||||
@ -185,7 +185,7 @@ NCD4_parse(NCD4meta* metadata)
|
||||
|
||||
done:
|
||||
if(dom != NULL)
|
||||
ezxml_free(dom);
|
||||
nc_ezxml_free(dom);
|
||||
reclaimParser(parser);
|
||||
return THROW(ret);
|
||||
}
|
||||
@ -227,11 +227,11 @@ traverse(NCD4parser* parser, ezxml_t dom)
|
||||
parser->metadata->root->meta.id = parser->metadata->ncid;
|
||||
parser->metadata->groupbyid = nclistnew();
|
||||
SETNAME(parser->metadata->root,"/");
|
||||
xattr = ezxml_attr(dom,"name");
|
||||
xattr = nc_ezxml_attr(dom,"name");
|
||||
if(xattr != NULL) parser->metadata->root->group.datasetname = strdup(xattr);
|
||||
xattr = ezxml_attr(dom,"dapVersion");
|
||||
xattr = nc_ezxml_attr(dom,"dapVersion");
|
||||
if(xattr != NULL) parser->metadata->root->group.dapversion = strdup(xattr);
|
||||
xattr = ezxml_attr(dom,"dmrVersion");
|
||||
xattr = nc_ezxml_attr(dom,"dmrVersion");
|
||||
if(xattr != NULL) parser->metadata->root->group.dmrversion = strdup(xattr);
|
||||
/* Recursively walk the tree */
|
||||
if((ret = fillgroup(parser,parser->metadata->root,dom))) goto done;
|
||||
@ -271,15 +271,15 @@ parseDimensions(NCD4parser* parser, NCD4node* group, ezxml_t xml)
|
||||
{
|
||||
int ret = NC_NOERR;
|
||||
ezxml_t x;
|
||||
for(x=ezxml_child(xml, "Dimension");x != NULL;x = ezxml_next(x)) {
|
||||
for(x=nc_ezxml_child(xml, "Dimension");x != NULL;x = nc_ezxml_next(x)) {
|
||||
NCD4node* dimnode = NULL;
|
||||
unsigned long long size;
|
||||
const char* sizestr;
|
||||
const char* unlimstr;
|
||||
sizestr = ezxml_attr(x,"size");
|
||||
sizestr = nc_ezxml_attr(x,"size");
|
||||
if(sizestr == NULL)
|
||||
FAIL(NC_EDIMSIZE,"Dimension has no size");
|
||||
unlimstr = ezxml_attr(x,UCARTAGUNLIM);
|
||||
unlimstr = nc_ezxml_attr(x,UCARTAGUNLIM);
|
||||
if((ret = parseULL(sizestr,&size))) goto done;
|
||||
if((ret=makeNode(parser,group,x,NCD4_DIM,NC_NULL,&dimnode))) goto done;
|
||||
dimnode->dim.size = (long long)size;
|
||||
@ -298,10 +298,10 @@ parseEnumerations(NCD4parser* parser, NCD4node* group, ezxml_t xml)
|
||||
int ret = NC_NOERR;
|
||||
ezxml_t x;
|
||||
|
||||
for(x=ezxml_child(xml, "Enumeration");x != NULL;x = ezxml_next(x)) {
|
||||
for(x=nc_ezxml_child(xml, "Enumeration");x != NULL;x = nc_ezxml_next(x)) {
|
||||
NCD4node* node = NULL;
|
||||
NCD4node* basetype = NULL;
|
||||
const char* fqn = ezxml_attr(x,"basetype");
|
||||
const char* fqn = nc_ezxml_attr(x,"basetype");
|
||||
basetype = lookupFQN(parser,fqn,NCD4_TYPE);
|
||||
if(basetype == NULL) {
|
||||
FAIL(NC_EBADTYPE,"Enumeration has unknown type: ",fqn);
|
||||
@ -314,7 +314,7 @@ parseEnumerations(NCD4parser* parser, NCD4node* group, ezxml_t xml)
|
||||
classify(group,node);
|
||||
/* Finally, see if this type has UCARTAGORIGTYPE xml attribute */
|
||||
if(parser->metadata->controller->controls.translation == NCD4_TRANSNC4) {
|
||||
const char* typetag = ezxml_attr(x,UCARTAGORIGTYPE);
|
||||
const char* typetag = nc_ezxml_attr(x,UCARTAGORIGTYPE);
|
||||
if(typetag != NULL) {
|
||||
}
|
||||
}
|
||||
@ -330,14 +330,14 @@ parseEconsts(NCD4parser* parser, NCD4node* en, ezxml_t xml)
|
||||
ezxml_t x;
|
||||
NClist* econsts = nclistnew();
|
||||
|
||||
for(x=ezxml_child(xml, "EnumConst");x != NULL;x = ezxml_next(x)) {
|
||||
for(x=nc_ezxml_child(xml, "EnumConst");x != NULL;x = nc_ezxml_next(x)) {
|
||||
NCD4node* ec = NULL;
|
||||
const char* name;
|
||||
const char* svalue;
|
||||
name = ezxml_attr(x,"name");
|
||||
name = nc_ezxml_attr(x,"name");
|
||||
if(name == NULL) FAIL(NC_EBADNAME,"Enum const with no name");
|
||||
if((ret=makeNode(parser,en,x,NCD4_ECONST,NC_NULL,&ec))) goto done ;
|
||||
svalue = ezxml_attr(x,"value");
|
||||
svalue = nc_ezxml_attr(x,"value");
|
||||
if(svalue == NULL)
|
||||
FAIL(NC_EINVAL,"Enumeration Constant has no value");
|
||||
if((ret=convertString(&ec->en.ecvalue,en->basetype,svalue)))
|
||||
@ -440,7 +440,7 @@ parseStructure(NCD4parser* parser, NCD4node* container, ezxml_t xml, NCD4node**
|
||||
|
||||
/* See if this var has UCARTAGORIGTYPE attribute */
|
||||
if(parser->metadata->controller->controls.translation == NCD4_TRANSNC4) {
|
||||
const char* typetag = ezxml_attr(xml,UCARTAGORIGTYPE);
|
||||
const char* typetag = nc_ezxml_attr(xml,UCARTAGORIGTYPE);
|
||||
if(typetag != NULL) {
|
||||
/* yes, place it on the type */
|
||||
if((ret=addOrigType(parser,var,type,typetag))) goto done;
|
||||
@ -524,7 +524,7 @@ parseSequence(NCD4parser* parser, NCD4node* container, ezxml_t xml, NCD4node** n
|
||||
Test: UCARTAGVLEN xml attribute is set
|
||||
*/
|
||||
if(parser->metadata->controller->controls.translation == NCD4_TRANSNC4) {
|
||||
const char* vlentag = ezxml_attr(xml,UCARTAGVLEN);
|
||||
const char* vlentag = nc_ezxml_attr(xml,UCARTAGVLEN);
|
||||
if(vlentag != NULL)
|
||||
usevlen = 1;
|
||||
} else
|
||||
@ -581,7 +581,7 @@ parseSequence(NCD4parser* parser, NCD4node* container, ezxml_t xml, NCD4node** n
|
||||
|
||||
/* See if this var has UCARTAGORIGTYPE attribute */
|
||||
if(parser->metadata->controller->controls.translation == NCD4_TRANSNC4) {
|
||||
const char* typetag = ezxml_attr(xml,UCARTAGORIGTYPE);
|
||||
const char* typetag = nc_ezxml_attr(xml,UCARTAGORIGTYPE);
|
||||
if(typetag != NULL) {
|
||||
/* yes, place it on the type */
|
||||
if((ret=addOrigType(parser,var,vlentype,typetag))) goto done;
|
||||
@ -599,9 +599,9 @@ parseGroups(NCD4parser* parser, NCD4node* parent, ezxml_t xml)
|
||||
{
|
||||
int ret = NC_NOERR;
|
||||
ezxml_t x;
|
||||
for(x=ezxml_child(xml, "Group");x != NULL;x = ezxml_next(x)) {
|
||||
for(x=nc_ezxml_child(xml, "Group");x != NULL;x = nc_ezxml_next(x)) {
|
||||
NCD4node* group = NULL;
|
||||
const char* name = ezxml_attr(x,"name");
|
||||
const char* name = nc_ezxml_attr(x,"name");
|
||||
if(name == NULL) FAIL(NC_EBADNAME,"Group has no name");
|
||||
if((ret=makeNode(parser,parent,x,NCD4_GROUP,NC_NULL,&group))) goto done;
|
||||
group->group.varbyid = nclistnew();
|
||||
@ -633,7 +633,7 @@ parseAtomicVar(NCD4parser* parser, NCD4node* container, ezxml_t xml, NCD4node**
|
||||
group = NCD4_groupFor(container);
|
||||
/* Locate its basetype; handle opaque and enum separately */
|
||||
if(info->subsort == NC_ENUM) {
|
||||
const char* enumfqn = ezxml_attr(xml,"enum");
|
||||
const char* enumfqn = nc_ezxml_attr(xml,"enum");
|
||||
if(enumfqn == NULL)
|
||||
base = NULL;
|
||||
else
|
||||
@ -654,7 +654,7 @@ parseAtomicVar(NCD4parser* parser, NCD4node* container, ezxml_t xml, NCD4node**
|
||||
if((ret = parseMetaData(parser,node,xml))) goto done;
|
||||
/* See if this var has UCARTAGORIGTYPE attribute */
|
||||
if(parser->metadata->controller->controls.translation == NCD4_TRANSNC4) {
|
||||
const char* typetag = ezxml_attr(xml,UCARTAGORIGTYPE);
|
||||
const char* typetag = nc_ezxml_attr(xml,UCARTAGORIGTYPE);
|
||||
if(typetag != NULL) {
|
||||
/* yes, place it on the type */
|
||||
if((ret=addOrigType(parser,node,node,typetag))) goto done;
|
||||
@ -670,18 +670,18 @@ parseDimRefs(NCD4parser* parser, NCD4node* var, ezxml_t xml)
|
||||
{
|
||||
int ret = NC_NOERR;
|
||||
ezxml_t x;
|
||||
for(x=ezxml_child(xml, "Dim");x!= NULL;x=ezxml_next(x)) {
|
||||
for(x=nc_ezxml_child(xml, "Dim");x!= NULL;x=nc_ezxml_next(x)) {
|
||||
NCD4node* dim = NULL;
|
||||
const char* fqn;
|
||||
|
||||
fqn = ezxml_attr(x,"name");
|
||||
fqn = nc_ezxml_attr(x,"name");
|
||||
if(fqn != NULL) {
|
||||
dim = lookupFQN(parser,fqn,NCD4_DIM);
|
||||
if(dim == NULL) {
|
||||
FAIL(NC_EBADDIM,"Cannot locate dim with name: %s",fqn);
|
||||
}
|
||||
} else {
|
||||
const char* sizestr = ezxml_attr(x,"size");
|
||||
const char* sizestr = nc_ezxml_attr(x,"size");
|
||||
if(sizestr == NULL) {
|
||||
FAIL(NC_EBADDIM,"Dimension reference has no name and no size");
|
||||
}
|
||||
@ -702,9 +702,9 @@ parseMaps(NCD4parser* parser, NCD4node* var, ezxml_t xml)
|
||||
int ret = NC_NOERR;
|
||||
ezxml_t x;
|
||||
|
||||
for(x=ezxml_child(xml, "Map");x!= NULL;x=ezxml_next(x)) {
|
||||
for(x=nc_ezxml_child(xml, "Map");x!= NULL;x=nc_ezxml_next(x)) {
|
||||
const char* fqn;
|
||||
fqn = ezxml_attr(x,"name");
|
||||
fqn = nc_ezxml_attr(x,"name");
|
||||
if(fqn == NULL)
|
||||
FAIL(NC_ENOTVAR,"<Map> has no name attribute");
|
||||
PUSH(var->mapnames,strdup(fqn));
|
||||
@ -724,7 +724,7 @@ parseAttributes(NCD4parser* parser, NCD4node* container, ezxml_t xml)
|
||||
{
|
||||
int count = 0;
|
||||
const char** all = NULL;
|
||||
all = ezxml_all_attr(xml,&count);
|
||||
all = nc_ezxml_all_attr(xml,&count);
|
||||
if(all != NULL && count > 0) {
|
||||
const char** p;
|
||||
container->xmlattributes = nclistnew();
|
||||
@ -737,9 +737,9 @@ parseAttributes(NCD4parser* parser, NCD4node* container, ezxml_t xml)
|
||||
}
|
||||
}
|
||||
|
||||
for(x=ezxml_child(xml, "Attribute");x!= NULL;x=ezxml_next(x)) {
|
||||
const char* name = ezxml_attr(x,"name");
|
||||
const char* type = ezxml_attr(x,"type");
|
||||
for(x=nc_ezxml_child(xml, "Attribute");x!= NULL;x=nc_ezxml_next(x)) {
|
||||
const char* name = nc_ezxml_attr(x,"name");
|
||||
const char* type = nc_ezxml_attr(x,"type");
|
||||
NCD4node* attr = NULL;
|
||||
NCD4node* basetype;
|
||||
|
||||
@ -779,24 +779,24 @@ done:
|
||||
static int
|
||||
parseError(NCD4parser* parser, ezxml_t errxml)
|
||||
{
|
||||
const char* shttpcode = ezxml_attr(errxml,"httpcode");
|
||||
const char* shttpcode = nc_ezxml_attr(errxml,"httpcode");
|
||||
ezxml_t x;
|
||||
if(shttpcode == NULL) shttpcode = "400";
|
||||
if(sscanf(shttpcode,"%d",&parser->metadata->error.httpcode) != 1)
|
||||
nclog(NCLOGERR,"Malformed <ERROR> response");
|
||||
x=ezxml_child(errxml, "Message");
|
||||
x=nc_ezxml_child(errxml, "Message");
|
||||
if(x != NULL) {
|
||||
const char* txt = ezxml_txt(x);
|
||||
const char* txt = nc_ezxml_txt(x);
|
||||
parser->metadata->error.message = (txt == NULL ? NULL : strdup(txt));
|
||||
}
|
||||
x=ezxml_child(errxml, "Context");
|
||||
x = nc_ezxml_child(errxml, "Context");
|
||||
if(x != NULL) {
|
||||
const char* txt = ezxml_txt(x);
|
||||
const char* txt = nc_ezxml_txt(x);
|
||||
parser->metadata->error.context = (txt == NULL ? NULL : strdup(txt));
|
||||
}
|
||||
x=ezxml_child(errxml, "OtherInformation");
|
||||
x=nc_ezxml_child(errxml, "OtherInformation");
|
||||
if(x != NULL) {
|
||||
const char* txt = ezxml_txt(x);
|
||||
const char* txt = nc_ezxml_txt(x);
|
||||
parser->metadata->error.otherinfo = (txt == NULL ? NULL : strdup(txt));
|
||||
}
|
||||
return THROW(NC_NOERR);
|
||||
@ -820,7 +820,7 @@ getOpaque(NCD4parser* parser, ezxml_t varxml, NCD4node* group)
|
||||
#endif
|
||||
if(parser->metadata->controller->controls.translation == NCD4_TRANSNC4) {
|
||||
/* See if this var has UCARTAGOPAQUE attribute */
|
||||
xattr = ezxml_attr(varxml,UCARTAGOPAQUE);
|
||||
xattr = nc_ezxml_attr(varxml,UCARTAGOPAQUE);
|
||||
if(xattr != NULL) {
|
||||
long long tmp = 0;
|
||||
if((ret = parseLL(xattr,&tmp)) || (tmp < 0))
|
||||
@ -865,18 +865,18 @@ getValueStrings(NCD4parser* parser, NCD4node* type, ezxml_t xattr, NClist* svalu
|
||||
{
|
||||
const char* s;
|
||||
/* See first if we have a "value" xml attribute */
|
||||
s = ezxml_attr(xattr,"value");
|
||||
s = nc_ezxml_attr(xattr,"value");
|
||||
if(s != NULL)
|
||||
PUSH(svalues,strdup(s));
|
||||
else {/* look for <Value> subnodes */
|
||||
ezxml_t x;
|
||||
for(x=ezxml_child(xattr, "Value");x != NULL;x = ezxml_next(x)) {
|
||||
for(x=nc_ezxml_child(xattr, "Value");x != NULL;x = nc_ezxml_next(x)) {
|
||||
char* es;
|
||||
char* ds;
|
||||
/* We assume that either their is a single xml attribute called "value",
|
||||
or there is a single chunk of text containing possibly multiple values.
|
||||
*/
|
||||
s = ezxml_attr(x,"value");
|
||||
s = nc_ezxml_attr(x,"value");
|
||||
if(s == NULL) {/* See if there is a text part. */
|
||||
s = x->txt;
|
||||
if(s == NULL) s = "";
|
||||
@ -1242,7 +1242,7 @@ makeNode(NCD4parser* parser, NCD4node* parent, ezxml_t xml, NCD4sort sort, nc_ty
|
||||
|
||||
/* Set node name, if it exists */
|
||||
if(xml != NULL) {
|
||||
const char* name = ezxml_attr(xml,"name");
|
||||
const char* name = nc_ezxml_attr(xml,"name");
|
||||
if(name != NULL) {
|
||||
if(strlen(name) > NC_MAX_NAME) {
|
||||
nclog(NCLOGERR,"Name too long: %s",name);
|
||||
|
@ -68,8 +68,6 @@ NCD4_initialize(void)
|
||||
#endif
|
||||
/* Init global state */
|
||||
globalinit();
|
||||
/* Load rc file */
|
||||
NC_rcload();
|
||||
|
||||
return THROW(NC_NOERR);
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
# See netcdf-c/COPYRIGHT file for more info.
|
||||
SET(libdispatch_SOURCES dparallel.c dcopy.c dfile.c ddim.c datt.c dattinq.c dattput.c dattget.c derror.c dvar.c dvarget.c dvarput.c dvarinq.c ddispatch.c nclog.c dstring.c dutf8.c dinternal.c doffsets.c ncuri.c nclist.c ncbytes.c nchashmap.c nctime.c nc.c nclistmgr.c utf8proc.h utf8proc.c dpathmgr.c dutil.c drc.c dauth.c dreadonly.c dnotnc4.c dnotnc3.c daux.c dinfermodel.c
|
||||
dcrc32.c dcrc32.h dcrc64.c ncexhash.c ncxcache.c ncjson.c)
|
||||
dcrc32.c dcrc32.h dcrc64.c ncexhash.c ncxcache.c ncjson.c ezxml.c ezxml_extra.c)
|
||||
|
||||
# Netcdf-4 only functions. Must be defined even if not used
|
||||
SET(libdispatch_SOURCES ${libdispatch_SOURCES} dgroup.c dvlen.c dcompound.c dtype.c denum.c dopaque.c dfilter.c)
|
||||
|
@ -20,8 +20,9 @@ dattinq.c dattput.c dattget.c derror.c dvar.c dvarget.c dvarput.c \
|
||||
dvarinq.c dinternal.c ddispatch.c dutf8.c nclog.c dstring.c ncuri.c \
|
||||
nclist.c ncbytes.c nchashmap.c nctime.c nc.c nclistmgr.c dauth.c \
|
||||
doffsets.c dpathmgr.c dutil.c dreadonly.c dnotnc4.c dnotnc3.c \
|
||||
daux.c dinfermodel.c \
|
||||
dcrc32.c dcrc32.h dcrc64.c ncexhash.c ncxcache.c ncjson.c
|
||||
daux.c dinfermodel.c \
|
||||
dcrc32.c dcrc32.h dcrc64.c ncexhash.c ncxcache.c ncjson.c \
|
||||
ezxml.c ezxml_extra.c
|
||||
|
||||
# Add the utf8 codebase
|
||||
libdispatch_la_SOURCES += utf8proc.c utf8proc.h
|
||||
@ -68,3 +69,14 @@ ncsettings.c: $(top_srcdir)/libnetcdf.settings ncsettings.hdr
|
||||
sed -e 's/\(.*\)/\"\1\\n\"/' | \
|
||||
cat >> ncsettings.c
|
||||
echo ';' >> ncsettings.c
|
||||
|
||||
# Show what is needed to insert a new version of ezxml
|
||||
# primary fix: The original ezxml.[ch] uses '//' comments;
|
||||
# unpack and replace with '/*..*/'
|
||||
EZXML=ezxml-0.8.6.tar.gz
|
||||
ezxml::
|
||||
rm -fr ./ezxml ./ezxml.[ch] ./license.txt
|
||||
tar -zxf ./${EZXML}
|
||||
sed -e 's|//\(.*\)|/*\1*/|' <ezxml/ezxml.c >./ezxml.c
|
||||
sed -e 's|//\(.*\)|/*\1*/|' <ezxml/ezxml.h >./ezxml.h
|
||||
cp ezxml/license.txt .
|
||||
|
@ -25,6 +25,11 @@ See COPYRIGHT for license information.
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
#include "ncrc.h"
|
||||
|
||||
|
||||
#define DEBUG
|
||||
|
||||
#undef MEMCHECK
|
||||
#define MEMCHECK(x) if((x)==NULL) {goto nomem;} else {}
|
||||
|
||||
@ -97,9 +102,9 @@ NC_authsetup(NCauth** authp, NCURI* uri)
|
||||
if(uri != NULL)
|
||||
uri_hostport = NC_combinehostport(uri);
|
||||
else
|
||||
return NC_EDAP; /* Generic EDAP error. */
|
||||
{ret = NC_EDAP; goto done;} /* Generic EDAP error. */
|
||||
if((auth=calloc(1,sizeof(NCauth)))==NULL)
|
||||
return NC_ENOMEM;
|
||||
{ret = NC_ENOMEM; goto done;}
|
||||
|
||||
setdefaults(auth);
|
||||
|
||||
@ -109,50 +114,46 @@ NC_authsetup(NCauth** authp, NCURI* uri)
|
||||
*/
|
||||
|
||||
setauthfield(auth,"HTTP.DEFLATE",
|
||||
NC_rclookup("HTTP.DEFLATE",uri_hostport));
|
||||
NC_rclookup("HTTP.DEFLATE",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.VERBOSE",
|
||||
NC_rclookup("HTTP.VERBOSE",uri_hostport));
|
||||
NC_rclookup("HTTP.VERBOSE",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.TIMEOUT",
|
||||
NC_rclookup("HTTP.TIMEOUT",uri_hostport));
|
||||
NC_rclookup("HTTP.TIMEOUT",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.CONNECTTIMEOUT",
|
||||
NC_rclookup("HTTP.CONNECTTIMEOUT",uri_hostport));
|
||||
NC_rclookup("HTTP.CONNECTTIMEOUT",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.USERAGENT",
|
||||
NC_rclookup("HTTP.USERAGENT",uri_hostport));
|
||||
NC_rclookup("HTTP.USERAGENT",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.COOKIEFILE",
|
||||
NC_rclookup("HTTP.COOKIEFILE",uri_hostport));
|
||||
NC_rclookup("HTTP.COOKIEFILE",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.COOKIE_FILE",
|
||||
NC_rclookup("HTTP.COOKIE_FILE",uri_hostport));
|
||||
NC_rclookup("HTTP.COOKIE_FILE",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.COOKIEJAR",
|
||||
NC_rclookup("HTTP.COOKIEJAR",uri_hostport));
|
||||
NC_rclookup("HTTP.COOKIEJAR",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.COOKIE_JAR",
|
||||
NC_rclookup("HTTP.COOKIE_JAR",uri_hostport));
|
||||
NC_rclookup("HTTP.COOKIE_JAR",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.PROXY.SERVER",
|
||||
NC_rclookup("HTTP.PROXY.SERVER",uri_hostport));
|
||||
NC_rclookup("HTTP.PROXY.SERVER",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.PROXY_SERVER",
|
||||
NC_rclookup("HTTP.PROXY_SERVER",uri_hostport));
|
||||
NC_rclookup("HTTP.PROXY_SERVER",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.SSL.CERTIFICATE",
|
||||
NC_rclookup("HTTP.SSL.CERTIFICATE",uri_hostport));
|
||||
NC_rclookup("HTTP.SSL.CERTIFICATE",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.SSL.KEY",
|
||||
NC_rclookup("HTTP.SSL.KEY",uri_hostport));
|
||||
NC_rclookup("HTTP.SSL.KEY",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.SSL.KEYPASSWORD",
|
||||
NC_rclookup("HTTP.SSL.KEYPASSWORD",uri_hostport));
|
||||
NC_rclookup("HTTP.SSL.KEYPASSWORD",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.SSL.CAINFO",
|
||||
NC_rclookup("HTTP.SSL.CAINFO",uri_hostport));
|
||||
NC_rclookup("HTTP.SSL.CAINFO",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.SSL.CAPATH",
|
||||
NC_rclookup("HTTP.SSL.CAPATH",uri_hostport));
|
||||
NC_rclookup("HTTP.SSL.CAPATH",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.SSL.VERIFYPEER",
|
||||
NC_rclookup("HTTP.SSL.VERIFYPEER",uri_hostport));
|
||||
NC_rclookup("HTTP.SSL.VERIFYPEER",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.SSL.VERIFYHOST",
|
||||
NC_rclookup("HTTP.SSL.VERIFYHOST",uri_hostport));
|
||||
NC_rclookup("HTTP.SSL.VERIFYHOST",uri_hostport,uri->path));
|
||||
/* Alias for VERIFYHOST + VERIFYPEER */
|
||||
setauthfield(auth,"HTTP.SSL.VALIDATE",
|
||||
NC_rclookup("HTTP.SSL.VALIDATE",uri_hostport));
|
||||
NC_rclookup("HTTP.SSL.VALIDATE",uri_hostport,uri->path));
|
||||
setauthfield(auth,"HTTP.NETRC",
|
||||
NC_rclookup("HTTP.NETRC",uri_hostport));
|
||||
setauthfield(auth,"HTTP.S3.ACCESSID",
|
||||
NC_rclookup("HTTP.S3.ACCESSID",uri_hostport));
|
||||
setauthfield(auth,"HTTP.S3.SECRETKEY",
|
||||
NC_rclookup("HTTP.S3.SECRETKEY",uri_hostport));
|
||||
NC_rclookup("HTTP.NETRC",uri_hostport,uri->path));
|
||||
|
||||
{ /* Handle various cases for user + password */
|
||||
/* First, see if the user+pwd was in the original url */
|
||||
@ -162,27 +163,30 @@ NC_authsetup(NCauth** authp, NCURI* uri)
|
||||
user = uri->user;
|
||||
pwd = uri->password;
|
||||
} else {
|
||||
user = NC_rclookup("HTTP.CREDENTIALS.USER",uri_hostport);
|
||||
pwd = NC_rclookup("HTTP.CREDENTIALS.PASSWORD",uri_hostport);
|
||||
user = NC_rclookup("HTTP.CREDENTIALS.USER",uri_hostport,uri->path);
|
||||
pwd = NC_rclookup("HTTP.CREDENTIALS.PASSWORD",uri_hostport,uri->path);
|
||||
}
|
||||
if(user != NULL && pwd != NULL) {
|
||||
user = strdup(user); /* so we can consistently reclaim */
|
||||
pwd = strdup(pwd);
|
||||
} else {
|
||||
/* Could not get user and pwd, so try USERPASSWORD */
|
||||
const char* userpwd = NC_rclookup("HTTP.CREDENTIALS.USERPASSWORD",uri_hostport);
|
||||
const char* userpwd = NC_rclookup("HTTP.CREDENTIALS.USERPASSWORD",uri_hostport,uri->path);
|
||||
if(userpwd != NULL) {
|
||||
ret = NC_parsecredentials(userpwd,&user,&pwd);
|
||||
if(ret) {nullfree(uri_hostport); return ret;}
|
||||
if((ret = NC_parsecredentials(userpwd,&user,&pwd))) goto done;
|
||||
}
|
||||
}
|
||||
setauthfield(auth,"HTTP.CREDENTIALS.USERNAME",user);
|
||||
setauthfield(auth,"HTTP.CREDENTIALS.PASSWORD",pwd);
|
||||
nullfree(user);
|
||||
nullfree(pwd);
|
||||
nullfree(uri_hostport);
|
||||
}
|
||||
/* Get the Default profile */
|
||||
auth->s3profile = strdup("default");
|
||||
|
||||
if(authp) {*authp = auth; auth = NULL;}
|
||||
done:
|
||||
nullfree(uri_hostport);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -210,8 +214,7 @@ NC_authfree(NCauth* auth)
|
||||
nullfree(auth->proxy.pwd);
|
||||
nullfree(auth->creds.user);
|
||||
nullfree(auth->creds.pwd);
|
||||
nullfree(auth->s3creds.accessid);
|
||||
nullfree(auth->s3creds.secretkey);
|
||||
nullfree(auth->s3profile);
|
||||
nullfree(auth);
|
||||
}
|
||||
|
||||
@ -224,32 +227,32 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
if(value == NULL) goto done;
|
||||
if(strcmp(flag,"HTTP.DEFLATE")==0) {
|
||||
if(atoi(value)) auth->curlflags.compress = 1;
|
||||
#ifdef D4DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.DEFLATE: %ld", infoflags.compress);
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.DEFLATE: %ld", auth->curlflags.compress);
|
||||
#endif
|
||||
}
|
||||
if(strcmp(flag,"HTTP.VERBOSE")==0) {
|
||||
if(atoi(value)) auth->curlflags.verbose = 1;
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.VERBOSE: %ld", auth->curlflags.verbose);
|
||||
#endif
|
||||
}
|
||||
if(strcmp(flag,"HTTP.TIMEOUT")==0) {
|
||||
if(atoi(value)) auth->curlflags.timeout = atoi(value);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.TIMEOUT: %ld", auth->curlflags.timeout);
|
||||
#endif
|
||||
}
|
||||
if(strcmp(flag,"HTTP.CONNECTTIMEOUT")==0) {
|
||||
if(atoi(value)) auth->curlflags.connecttimeout = atoi(value);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.CONNECTTIMEOUT: %ld", auth->curlflags.connecttimeout);
|
||||
#endif
|
||||
}
|
||||
if(strcmp(flag,"HTTP.USERAGENT")==0) {
|
||||
if(atoi(value)) auth->curlflags.useragent = strdup(value);
|
||||
MEMCHECK(auth->curlflags.useragent);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.USERAGENT: %s", auth->curlflags.useragent);
|
||||
#endif
|
||||
}
|
||||
@ -262,14 +265,14 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
nullfree(auth->curlflags.cookiejar);
|
||||
auth->curlflags.cookiejar = strdup(value);
|
||||
MEMCHECK(auth->curlflags.cookiejar);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.COOKIEJAR: %s", auth->curlflags.cookiejar);
|
||||
#endif
|
||||
}
|
||||
if(strcmp(flag,"HTTP.PROXY.SERVER")==0 || strcmp(flag,"HTTP.PROXY_SERVER")==0) {
|
||||
ret = NC_parseproxy(auth,value);
|
||||
if(ret != NC_NOERR) goto done;
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.PROXY.SERVER: %s", value);
|
||||
#endif
|
||||
}
|
||||
@ -277,7 +280,7 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
int v;
|
||||
if((v = atol(value))) {
|
||||
auth->ssl.verifypeer = v;
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.SSL.VERIFYPEER: %d", v);
|
||||
#endif
|
||||
}
|
||||
@ -286,7 +289,7 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
int v;
|
||||
if((v = atol(value))) {
|
||||
auth->ssl.verifyhost = v;
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.SSL.VERIFYHOST: %d", v);
|
||||
#endif
|
||||
}
|
||||
@ -302,7 +305,7 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
nullfree(auth->ssl.certificate);
|
||||
auth->ssl.certificate = strdup(value);
|
||||
MEMCHECK(auth->ssl.certificate);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.SSL.CERTIFICATE: %s", auth->ssl.certificate);
|
||||
#endif
|
||||
}
|
||||
@ -311,7 +314,7 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
nullfree(auth->ssl.key);
|
||||
auth->ssl.key = strdup(value);
|
||||
MEMCHECK(auth->ssl.key);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.SSL.KEY: %s", auth->ssl.key);
|
||||
#endif
|
||||
}
|
||||
@ -320,7 +323,7 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
nullfree(auth->ssl.keypasswd) ;
|
||||
auth->ssl.keypasswd = strdup(value);
|
||||
MEMCHECK(auth->ssl.keypasswd);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.SSL.KEYPASSWORD: %s", auth->ssl.keypasswd);
|
||||
#endif
|
||||
}
|
||||
@ -329,7 +332,7 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
nullfree(auth->ssl.cainfo) ;
|
||||
auth->ssl.cainfo = strdup(value);
|
||||
MEMCHECK(auth->ssl.cainfo);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.SSL.CAINFO: %s", auth->ssl.cainfo);
|
||||
#endif
|
||||
}
|
||||
@ -338,7 +341,7 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
nullfree(auth->ssl.capath) ;
|
||||
auth->ssl.capath = strdup(value);
|
||||
MEMCHECK(auth->ssl.capath);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.SSL.CAPATH: %s", auth->ssl.capath);
|
||||
#endif
|
||||
}
|
||||
@ -346,7 +349,7 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
nullfree(auth->curlflags.netrc);
|
||||
auth->curlflags.netrc = strdup(value);
|
||||
MEMCHECK(auth->curlflags.netrc);
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DEBUG
|
||||
nclog(NCLOGNOTE,"HTTP.NETRC: %s", auth->curlflags.netrc);
|
||||
#endif
|
||||
}
|
||||
@ -362,17 +365,6 @@ setauthfield(NCauth* auth, const char* flag, const char* value)
|
||||
MEMCHECK(auth->creds.pwd);
|
||||
}
|
||||
|
||||
if(strcmp(flag,"HTTP.S3.ACCESSID")==0) {
|
||||
nullfree(auth->s3creds.accessid);
|
||||
auth->s3creds.accessid = strdup(value);
|
||||
MEMCHECK(auth->s3creds.accessid);
|
||||
}
|
||||
if(strcmp(flag,"HTTP.S3.SECRETKEY")==0) {
|
||||
nullfree(auth->s3creds.secretkey);
|
||||
auth->s3creds.secretkey = strdup(value);
|
||||
MEMCHECK(auth->s3creds.secretkey);
|
||||
}
|
||||
|
||||
done:
|
||||
return (ret);
|
||||
|
||||
|
@ -51,6 +51,7 @@ NCDISPATCH_initialize(void)
|
||||
NC_stride_one[i] = 1;
|
||||
}
|
||||
|
||||
status = ncrc_createglobalstate(); /* will allocate and clear */
|
||||
globalstate = ncrc_getglobalstate(); /* will allocate and clear */
|
||||
|
||||
/* Capture temp dir*/
|
||||
@ -93,10 +94,11 @@ NCDISPATCH_initialize(void)
|
||||
globalstate->cwd = strdup(cwdbuf);
|
||||
}
|
||||
|
||||
/* Now load RC File */
|
||||
status = NC_rcload();
|
||||
ncloginit();
|
||||
|
||||
/* Now load RC Files */
|
||||
ncrc_initialize();
|
||||
|
||||
/* Compute type alignments */
|
||||
NC_compute_alignments();
|
||||
|
||||
|
@ -24,25 +24,24 @@
|
||||
#include "nclist.h"
|
||||
#include "nchttp.h"
|
||||
|
||||
#undef VERBOSE
|
||||
#undef TRACE
|
||||
|
||||
#define CURLERR(e) reporterror(state,(e))
|
||||
|
||||
/* Mnemonics */
|
||||
#define GETCMD 0
|
||||
#define HEADCMD 1
|
||||
|
||||
#if 0
|
||||
static const char* LENGTH_ACCEPT[] = {"content-length","accept-ranges",NULL};
|
||||
#endif
|
||||
static const char* CONTENTLENGTH[] = {"content-length",NULL};
|
||||
|
||||
/* Forward */
|
||||
static int setupconn(NC_HTTP_STATE* state, const char* objecturl, NCbytes* buf);
|
||||
static int execute(NC_HTTP_STATE* state, int headcmd);
|
||||
static int setupconn(NC_HTTP_STATE* state, const char* objecturl);
|
||||
static int execute(NC_HTTP_STATE* state);
|
||||
static int headerson(NC_HTTP_STATE* state, const char** which);
|
||||
static void headersoff(NC_HTTP_STATE* state);
|
||||
static void showerrors(NC_HTTP_STATE* state);
|
||||
static int reporterror(NC_HTTP_STATE* state, CURLcode cstat);
|
||||
static int lookupheader(NC_HTTP_STATE* state, const char* key, const char** valuep);
|
||||
static int my_trace(CURL *handle, curl_infotype type, char *data, size_t size,void *userp);
|
||||
|
||||
#ifdef TRACE
|
||||
static void
|
||||
@ -65,16 +64,19 @@ Trace(const char* fcn)
|
||||
/**************************************************/
|
||||
|
||||
/**
|
||||
@param objecturl url we propose to access
|
||||
@param curlp curl handle stored here if non-NULL
|
||||
@param filelenp store length of the file here if non-NULL
|
||||
*/
|
||||
|
||||
int
|
||||
nc_http_open(const char* objecturl, NC_HTTP_STATE** statep, long long* filelenp)
|
||||
nc_http_init(NC_HTTP_STATE** statep)
|
||||
{
|
||||
return nc_http_init_verbose(statep,0);
|
||||
}
|
||||
|
||||
int
|
||||
nc_http_init_verbose(NC_HTTP_STATE** statep, int verbose)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
int i;
|
||||
NC_HTTP_STATE* state = NULL;
|
||||
|
||||
Trace("open");
|
||||
@ -85,40 +87,19 @@ nc_http_open(const char* objecturl, NC_HTTP_STATE** statep, long long* filelenp)
|
||||
state->curl = curl_easy_init();
|
||||
if (state->curl == NULL) {stat = NC_ECURL; goto done;}
|
||||
showerrors(state);
|
||||
#ifdef VERBOSE
|
||||
{long onoff = 1;
|
||||
CURLcode cstat = CURLE_OK;
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_VERBOSE, onoff));
|
||||
if(cstat != CURLE_OK)
|
||||
{stat = NC_ECURL; goto done;}
|
||||
if(verbose) {
|
||||
long onoff = 1;
|
||||
CURLcode cstat = CURLE_OK;
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_VERBOSE, onoff));
|
||||
if(cstat != CURLE_OK) {stat = NC_ECURL; goto done;}
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_DEBUGFUNCTION, my_trace));
|
||||
if(cstat != CURLE_OK) {stat = NC_ECURL; goto done;}
|
||||
}
|
||||
#endif
|
||||
if(filelenp) {
|
||||
*filelenp = -1;
|
||||
/* Attempt to get the file length using HEAD */
|
||||
if((stat = setupconn(state,objecturl,NULL))) goto done;
|
||||
if((stat = headerson(state,LENGTH_ACCEPT))) goto done;
|
||||
if((stat = execute(state,HEADCMD))) goto done;
|
||||
for(i=0;i<nclistlength(state->headers);i+=2) {
|
||||
char* s = nclistget(state->headers,i);
|
||||
if(strcasecmp(s,"content-length")==0) {
|
||||
s = nclistget(state->headers,i+1);
|
||||
sscanf(s,"%lld",filelenp);
|
||||
break;
|
||||
}
|
||||
/* Also check for the Accept-Ranges header */
|
||||
if(strcasecmp(s,"accept-ranges")==0) {
|
||||
s = nclistget(state->headers,i+1);
|
||||
if(strcasecmp(s,"bytes")!=0) /* oops! */
|
||||
{stat = NC_EURL; goto done;}
|
||||
}
|
||||
}
|
||||
headersoff(state);
|
||||
}
|
||||
stat = nc_http_reset(state);
|
||||
if(statep) {*statep = state; state = NULL;}
|
||||
|
||||
done:
|
||||
nc_http_close(state);
|
||||
if(state) nc_http_close(state);
|
||||
dbgflush();
|
||||
return stat;
|
||||
}
|
||||
@ -133,17 +114,94 @@ nc_http_close(NC_HTTP_STATE* state)
|
||||
if(state == NULL) return stat;
|
||||
if(state->curl != NULL)
|
||||
(void)curl_easy_cleanup(state->curl);
|
||||
nclistfreeall(state->headers); state->headers = NULL;
|
||||
if(state->buf != NULL)
|
||||
abort();
|
||||
nclistfreeall(state->response.headset); state->response.headset = NULL;
|
||||
nclistfreeall(state->response.headers); state->response.headers = NULL;
|
||||
ncbytesfree(state->response.buf);
|
||||
nclistfreeall(state->request.headers); state->request.headers = NULL;
|
||||
nullfree(state);
|
||||
dbgflush();
|
||||
return stat;
|
||||
}
|
||||
|
||||
/* Reset after a request */
|
||||
int
|
||||
nc_http_reset(NC_HTTP_STATE* state)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
CURLcode cstat = CURLE_OK;
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_HTTPGET, 1L));
|
||||
if(cstat != CURLE_OK) {stat = NC_ECURL; goto done;}
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_NOBODY, 0L));
|
||||
if(cstat != CURLE_OK) {stat = NC_ECURL; goto done;}
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_PUT, 0L));
|
||||
if(cstat != CURLE_OK) {stat = NC_ECURL; goto done;}
|
||||
cstat = curl_easy_setopt(state->curl, CURLOPT_CUSTOMREQUEST, NULL);
|
||||
if(cstat != CURLE_OK) {stat = NC_ECURL; goto done;}
|
||||
cstat = curl_easy_setopt(state->curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)-1);
|
||||
if(cstat != CURLE_OK) {stat = NC_ECURL; goto done;}
|
||||
state->request.method = HTTPGET;
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, NULL));
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, NULL));
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_READFUNCTION, NULL));
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_READDATA, NULL));
|
||||
headersoff(state);
|
||||
done:
|
||||
return stat;
|
||||
}
|
||||
|
||||
/**************************************************/
|
||||
/* Set misc parameters */
|
||||
|
||||
int
|
||||
nc_http_set_method(NC_HTTP_STATE* state, HTTPMETHOD method)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
CURLcode cstat = CURLE_OK;
|
||||
switch (method) {
|
||||
case HTTPGET:
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_HTTPGET, 1L));
|
||||
break;
|
||||
case HTTPHEAD:
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_HTTPGET, 1L));
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1L));
|
||||
break;
|
||||
case HTTPPUT:
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_PUT, 1L));
|
||||
break;
|
||||
case HTTPDELETE:
|
||||
cstat = curl_easy_setopt(state->curl, CURLOPT_CUSTOMREQUEST, "DELETE");
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1L));
|
||||
break;
|
||||
default: stat = NC_EINVAL; break;
|
||||
}
|
||||
if(cstat != CURLE_OK) {stat = NC_ECURL; goto done;}
|
||||
state->request.method = method;
|
||||
done:
|
||||
return stat;
|
||||
}
|
||||
|
||||
int
|
||||
nc_http_set_payload(NC_HTTP_STATE* state, size_t size, void* payload)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
state->request.payloadsize = size;
|
||||
state->request.payload = payload;
|
||||
state->request.payloadpos = 0;
|
||||
return stat;
|
||||
}
|
||||
|
||||
int
|
||||
nc_http_set_response(NC_HTTP_STATE* state, NCbytes* buf)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
state->response.buf = buf;
|
||||
return stat;
|
||||
}
|
||||
|
||||
/**************************************************/
|
||||
/**
|
||||
Assume URL etc has already been set.
|
||||
@param curl curl handle
|
||||
@param state state handle
|
||||
@param objecturl to read
|
||||
@param start starting offset
|
||||
@param count number of bytes to read
|
||||
@param buf store read data here -- caller must allocate and free
|
||||
@ -161,7 +219,8 @@ nc_http_read(NC_HTTP_STATE* state, const char* objecturl, size64_t start, size64
|
||||
if(count == 0)
|
||||
goto done; /* do not attempt to read */
|
||||
|
||||
if((stat = setupconn(state,objecturl,buf)))
|
||||
if((stat = nc_http_set_response(state,buf))) goto fail;
|
||||
if((stat = setupconn(state,objecturl)))
|
||||
goto fail;
|
||||
|
||||
/* Set to read byte range */
|
||||
@ -170,10 +229,11 @@ nc_http_read(NC_HTTP_STATE* state, const char* objecturl, size64_t start, size64
|
||||
if(cstat != CURLE_OK)
|
||||
{stat = NC_ECURL; goto done;}
|
||||
|
||||
if((stat = execute(state,GETCMD)))
|
||||
if((stat = execute(state)))
|
||||
goto done;
|
||||
done:
|
||||
state->buf = NULL;
|
||||
nc_http_reset(state);
|
||||
state->response.buf = NULL;
|
||||
dbgflush();
|
||||
return stat;
|
||||
|
||||
@ -182,6 +242,33 @@ fail:
|
||||
goto done;
|
||||
}
|
||||
|
||||
/**
|
||||
@param state state handle
|
||||
@param objecturl to write
|
||||
@param payload send as body of a PUT
|
||||
*/
|
||||
|
||||
int
|
||||
nc_http_write(NC_HTTP_STATE* state, const char* objecturl, NCbytes* payload)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
|
||||
Trace("write");
|
||||
|
||||
if((stat = nc_http_set_payload(state,ncbyteslength(payload),ncbytescontents(payload)))) goto fail;
|
||||
if((stat = nc_http_set_method(state,HTTPPUT)));
|
||||
if((stat = setupconn(state,objecturl))) goto fail;
|
||||
if((stat = execute(state)))
|
||||
goto done;
|
||||
done:
|
||||
nc_http_reset(state);
|
||||
return stat;
|
||||
|
||||
fail:
|
||||
stat = NC_ECURL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/**
|
||||
Return length of an object.
|
||||
Assume URL etc has already been set.
|
||||
@ -191,48 +278,90 @@ Assume URL etc has already been set.
|
||||
int
|
||||
nc_http_size(NC_HTTP_STATE* state, const char* objecturl, long long* sizep)
|
||||
{
|
||||
int i,stat = NC_NOERR;
|
||||
int stat = NC_NOERR;
|
||||
const char* hdr = NULL;
|
||||
|
||||
Trace("size");
|
||||
if(sizep == NULL)
|
||||
goto done; /* do not attempt to read */
|
||||
|
||||
if((stat = setupconn(state,objecturl,NULL)))
|
||||
if((stat = nc_http_set_method(state,HTTPHEAD))) goto done;
|
||||
if((stat = setupconn(state,objecturl)))
|
||||
goto done;
|
||||
/* Make sure we get headers */
|
||||
if((stat = headerson(state,CONTENTLENGTH))) goto done;
|
||||
|
||||
state->httpcode = 200;
|
||||
if((stat = execute(state,HEADCMD)))
|
||||
if((stat = execute(state)))
|
||||
goto done;
|
||||
|
||||
if(nclistlength(state->headers) == 0)
|
||||
if(nclistlength(state->response.headers) == 0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
|
||||
/* Get the content length header */
|
||||
for(i=0;i<nclistlength(state->headers);i+=2) {
|
||||
char* s = nclistget(state->headers,i);
|
||||
if(strcasecmp(s,"content-length")==0) {
|
||||
s = nclistget(state->headers,i+1);
|
||||
sscanf(s,"%llu",sizep);
|
||||
break;
|
||||
}
|
||||
if((stat = lookupheader(state,"content-length",&hdr))==NC_NOERR) {
|
||||
sscanf(hdr,"%llu",sizep);
|
||||
}
|
||||
|
||||
done:
|
||||
nc_http_reset(state);
|
||||
headersoff(state);
|
||||
dbgflush();
|
||||
return stat;
|
||||
}
|
||||
|
||||
int
|
||||
nc_http_headers(NC_HTTP_STATE* state, const NClist** headersp)
|
||||
nc_http_response_headset(NC_HTTP_STATE* state, const NClist* keys)
|
||||
{
|
||||
if(headersp) *headersp = state->headers;
|
||||
int i;
|
||||
if(keys == NULL) return NC_NOERR;
|
||||
if(state->response.headset == NULL)
|
||||
state->response.headset = nclistnew();
|
||||
for(i=0;i<nclistlength(keys);i++) {
|
||||
const char* key = (const char*)nclistget(keys,i);
|
||||
if(!nclistmatch(state->response.headset,key,0)) /* remove duplicates */
|
||||
nclistpush(state->response.headset,strdup(key));
|
||||
}
|
||||
return NC_NOERR;
|
||||
}
|
||||
|
||||
int
|
||||
nc_http_response_headers(NC_HTTP_STATE* state, NClist** headersp)
|
||||
{
|
||||
NClist* headers = NULL;
|
||||
if(headersp != NULL) {
|
||||
headers = nclistclone(state->response.headers,1);
|
||||
*headersp = headers; headers = NULL;
|
||||
}
|
||||
return NC_NOERR;
|
||||
}
|
||||
|
||||
int
|
||||
nc_http_request_setheaders(NC_HTTP_STATE* state, const NClist* headers)
|
||||
{
|
||||
nclistfreeall(state->request.headers);
|
||||
state->request.headers = nclistclone(headers,1);
|
||||
return NC_NOERR;
|
||||
}
|
||||
|
||||
/**************************************************/
|
||||
|
||||
static size_t
|
||||
ReadMemoryCallback(char* buffer, size_t size, size_t nmemb, void *data)
|
||||
{
|
||||
NC_HTTP_STATE* state = data;
|
||||
size_t transfersize = size * nmemb;
|
||||
size_t avail = (state->request.payloadsize - state->request.payloadpos);
|
||||
|
||||
Trace("ReadMemoryCallback");
|
||||
if(transfersize == 0)
|
||||
nclog(NCLOGWARN,"ReadMemoryCallback: zero sized buffer");
|
||||
if(transfersize > avail) transfersize = avail;
|
||||
memcpy(buffer,((char*)state->request.payload)+state->request.payloadpos,transfersize);
|
||||
state->request.payloadpos += transfersize;
|
||||
return transfersize;
|
||||
}
|
||||
|
||||
static size_t
|
||||
WriteMemoryCallback(void *ptr, size_t size, size_t nmemb, void *data)
|
||||
{
|
||||
@ -242,7 +371,7 @@ WriteMemoryCallback(void *ptr, size_t size, size_t nmemb, void *data)
|
||||
Trace("WriteMemoryCallback");
|
||||
if(realsize == 0)
|
||||
nclog(NCLOGWARN,"WriteMemoryCallback: zero sized chunk");
|
||||
ncbytesappendn(state->buf, ptr, realsize);
|
||||
ncbytesappendn(state->response.buf, ptr, realsize);
|
||||
return realsize;
|
||||
}
|
||||
|
||||
@ -286,7 +415,7 @@ HeaderCallback(char *buffer, size_t size, size_t nitems, void *data)
|
||||
int havecolon;
|
||||
NC_HTTP_STATE* state = data;
|
||||
int match;
|
||||
const char** hdr;
|
||||
const char* hdr;
|
||||
|
||||
Trace("HeaderCallback");
|
||||
if(realsize == 0)
|
||||
@ -300,9 +429,10 @@ HeaderCallback(char *buffer, size_t size, size_t nitems, void *data)
|
||||
name = malloc(i+1);
|
||||
memcpy(name,buffer,i);
|
||||
name[i] = '\0';
|
||||
if(state->headset != NULL) {
|
||||
for(match=0,hdr=state->headset;*hdr;hdr++) {
|
||||
if(strcasecmp(*hdr,name)==0) {match = 1; break;}
|
||||
if(state->response.headset != NULL) {
|
||||
for(match=0,i=0;i<nclistlength(state->response.headset);i++) {
|
||||
hdr = (const char*)nclistget(state->response.headset,i);
|
||||
if(strcasecmp(hdr,name)==0) {match = 1; break;}
|
||||
}
|
||||
if(!match) goto done;
|
||||
}
|
||||
@ -316,12 +446,12 @@ HeaderCallback(char *buffer, size_t size, size_t nitems, void *data)
|
||||
value[vlen] = '\0';
|
||||
trim(value);
|
||||
}
|
||||
if(state->headers == NULL)
|
||||
state->headers = nclistnew();
|
||||
nclistpush(state->headers,name);
|
||||
if(state->response.headers == NULL)
|
||||
state->response.headers = nclistnew();
|
||||
nclistpush(state->response.headers,name);
|
||||
name = NULL;
|
||||
if(value == NULL) value = strdup("");
|
||||
nclistpush(state->headers,value);
|
||||
nclistpush(state->response.headers,value);
|
||||
value = NULL;
|
||||
done:
|
||||
nullfree(name);
|
||||
@ -329,7 +459,7 @@ done:
|
||||
}
|
||||
|
||||
static int
|
||||
setupconn(NC_HTTP_STATE* state, const char* objecturl, NCbytes* buf)
|
||||
setupconn(NC_HTTP_STATE* state, const char* objecturl)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
CURLcode cstat = CURLE_OK;
|
||||
@ -352,8 +482,10 @@ setupconn(NC_HTTP_STATE* state, const char* objecturl, NCbytes* buf)
|
||||
cstat = curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1);
|
||||
if (cstat != CURLE_OK) goto fail;
|
||||
|
||||
state->buf = buf;
|
||||
if(buf != NULL) {
|
||||
/* Set the method */
|
||||
if((stat = nc_http_set_method(state,state->request.method))) goto done;
|
||||
|
||||
if(state->response.buf) {
|
||||
/* send all data to this function */
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback));
|
||||
if (cstat != CURLE_OK) goto fail;
|
||||
@ -364,7 +496,28 @@ setupconn(NC_HTTP_STATE* state, const char* objecturl, NCbytes* buf)
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, NULL));
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, NULL));
|
||||
}
|
||||
if(state->request.payloadsize > 0) {
|
||||
state->request.payloadpos = 0; /* track reading */
|
||||
/* send all data to this function */
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_READFUNCTION, ReadMemoryCallback));
|
||||
if (cstat != CURLE_OK) goto fail;
|
||||
/* Set argument for ReadMemoryCallback */
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_READDATA, (void*)state));
|
||||
if (cstat != CURLE_OK) goto fail;
|
||||
} else {/* turn off data capture */
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_READFUNCTION, NULL));
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_READDATA, NULL));
|
||||
}
|
||||
|
||||
/* Do method specific actions */
|
||||
switch(state->request.method) {
|
||||
case HTTPPUT:
|
||||
if(state->request.payloadsize > 0)
|
||||
cstat = curl_easy_setopt(state->curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)state->request.payloadsize);
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
done:
|
||||
return stat;
|
||||
fail:
|
||||
@ -375,27 +528,17 @@ fail:
|
||||
}
|
||||
|
||||
static int
|
||||
execute(NC_HTTP_STATE* state, int headcmd)
|
||||
execute(NC_HTTP_STATE* state)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
CURLcode cstat = CURLE_OK;
|
||||
|
||||
if(headcmd) {
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1L));
|
||||
if(cstat != CURLE_OK) goto fail;
|
||||
}
|
||||
|
||||
cstat = CURLERR(curl_easy_perform(state->curl));
|
||||
if(cstat != CURLE_OK) goto fail;
|
||||
|
||||
cstat = CURLERR(curl_easy_getinfo(state->curl,CURLINFO_RESPONSE_CODE,&state->httpcode));
|
||||
if(cstat != CURLE_OK) state->httpcode = 0;
|
||||
|
||||
if(headcmd) {
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_HTTPGET, 1L));
|
||||
if(cstat != CURLE_OK) goto fail;
|
||||
}
|
||||
|
||||
done:
|
||||
return stat;
|
||||
fail:
|
||||
@ -408,11 +551,16 @@ headerson(NC_HTTP_STATE* state, const char** headset)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
CURLcode cstat = CURLE_OK;
|
||||
const char** p;
|
||||
|
||||
if(state->headers != NULL)
|
||||
nclistfreeall(state->headers);
|
||||
state->headers = nclistnew();
|
||||
state->headset = headset;
|
||||
if(state->response.headers != NULL)
|
||||
nclistfreeall(state->response.headers);
|
||||
state->response.headers = nclistnew();
|
||||
if(state->response.headset != NULL)
|
||||
nclistfreeall(state->response.headset);
|
||||
state->response.headset = nclistnew();
|
||||
for(p=headset;*p;p++)
|
||||
nclistpush(state->response.headset,strdup(*p));
|
||||
|
||||
cstat = CURLERR(curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION, HeaderCallback));
|
||||
if(cstat != CURLE_OK) goto fail;
|
||||
@ -429,12 +577,31 @@ fail:
|
||||
static void
|
||||
headersoff(NC_HTTP_STATE* state)
|
||||
{
|
||||
nclistfreeall(state->headers);
|
||||
state->headers = NULL;
|
||||
nclistfreeall(state->response.headers);
|
||||
state->response.headers = NULL;
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION, NULL));
|
||||
(void)CURLERR(curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, NULL));
|
||||
}
|
||||
|
||||
static int
|
||||
lookupheader(NC_HTTP_STATE* state, const char* key, const char** valuep)
|
||||
{
|
||||
int i;
|
||||
const char* value = NULL;
|
||||
/* Get the content length header */
|
||||
for(i=0;i<nclistlength(state->response.headers);i+=2) {
|
||||
char* s = nclistget(state->response.headers,i);
|
||||
if(strcasecmp(s,key)==0) {
|
||||
value = nclistget(state->response.headers,i+1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(value == NULL) return NC_ENOOBJECT;
|
||||
if(valuep)
|
||||
*valuep = value;
|
||||
return NC_NOERR;
|
||||
}
|
||||
|
||||
static void
|
||||
showerrors(NC_HTTP_STATE* state)
|
||||
{
|
||||
@ -449,3 +616,71 @@ reporterror(NC_HTTP_STATE* state, CURLcode cstat)
|
||||
cstat,curl_easy_strerror(cstat),state->errbuf);
|
||||
return cstat;
|
||||
}
|
||||
|
||||
static
|
||||
void dump(const char *text, FILE *stream, unsigned char *ptr, size_t size)
|
||||
{
|
||||
size_t i;
|
||||
size_t c;
|
||||
unsigned int width=0x10;
|
||||
|
||||
fprintf(stream, "%s, %10.10ld bytes (0x%8.8lx)\n",
|
||||
text, (long)size, (long)size);
|
||||
|
||||
for(i=0; i<size; i+= width) {
|
||||
fprintf(stream, "%4.4lx: ", (long)i);
|
||||
|
||||
/* show hex to the left */
|
||||
for(c = 0; c < width; c++) {
|
||||
if(i+c < size)
|
||||
fprintf(stream, "%02x ", ptr[i+c]);
|
||||
else
|
||||
fputs(" ", stream);
|
||||
}
|
||||
|
||||
/* show data on the right */
|
||||
for(c = 0; (c < width) && (i+c < size); c++) {
|
||||
char x = (ptr[i+c] >= 0x20 && ptr[i+c] < 0x80) ? ptr[i+c] : '.';
|
||||
fputc(x, stream);
|
||||
}
|
||||
|
||||
fputc('\n', stream); /* newline */
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
my_trace(CURL *handle, curl_infotype type, char *data, size_t size,void *userp)
|
||||
{
|
||||
const char *text;
|
||||
(void)handle; /* prevent compiler warning */
|
||||
(void)userp;
|
||||
|
||||
switch (type) {
|
||||
case CURLINFO_TEXT:
|
||||
fprintf(stderr, "== Info: %s", data);
|
||||
default: /* in case a new one is introduced to shock us */
|
||||
return 0;
|
||||
|
||||
case CURLINFO_HEADER_OUT:
|
||||
text = "=> Send header";
|
||||
break;
|
||||
case CURLINFO_DATA_OUT:
|
||||
text = "=> Send data";
|
||||
break;
|
||||
case CURLINFO_SSL_DATA_OUT:
|
||||
text = "=> Send SSL data";
|
||||
break;
|
||||
case CURLINFO_HEADER_IN:
|
||||
text = "<= Recv header";
|
||||
break;
|
||||
case CURLINFO_DATA_IN:
|
||||
text = "<= Recv data";
|
||||
break;
|
||||
case CURLINFO_SSL_DATA_IN:
|
||||
text = "<= Recv SSL data";
|
||||
break;
|
||||
}
|
||||
|
||||
dump(text, stderr, (unsigned char *)data, size);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1078,7 +1078,8 @@ openmagic(struct MagicFile* file)
|
||||
/* Construct a URL minus any fragment */
|
||||
file->curlurl = ncuribuild(file->uri,NULL,NULL,NCURISVC);
|
||||
/* Open the curl handle */
|
||||
if((status=nc_http_open(file->curlurl,&file->state,&file->filelen))) goto done;
|
||||
if((status=nc_http_init(&file->state))) goto done;
|
||||
if((status=nc_http_size(file->state,file->curlurl,&file->filelen))) goto done;
|
||||
#endif
|
||||
} else {
|
||||
#ifdef USE_PARALLEL
|
||||
|
@ -837,18 +837,27 @@ getwdpath(struct Path* wd)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
char* path = NULL;
|
||||
|
||||
if(wd->path != NULL) return stat;
|
||||
memset(wd,0,sizeof(struct Path));
|
||||
#ifdef _WIN32
|
||||
{
|
||||
#ifdef _WIN32
|
||||
wchar_t* wcwd = NULL;
|
||||
wchar_t* wpath = NULL;
|
||||
wpath = _wgetcwd(NULL,8192);
|
||||
if((stat = wide2utf8(wpath,&path)))
|
||||
{nullfree(wpath); wpath = NULL; return stat;}
|
||||
#else
|
||||
path = getcwd(NULL,8192);
|
||||
#endif
|
||||
wcwd = (wchar_t*)calloc(8192, sizeof(wchar_t));
|
||||
wpath = _wgetcwd(wcwd, 8192);
|
||||
path = NULL;
|
||||
stat = wide2utf8(wpath, &path);
|
||||
free(wcwd);
|
||||
if (stat) return stat;
|
||||
}
|
||||
#else
|
||||
{
|
||||
char cwd[8192];
|
||||
path = getcwd(cwd, sizeof(cwd));
|
||||
path = strdup(path);
|
||||
}
|
||||
#endif
|
||||
stat = parsepath(path,wd);
|
||||
/* Force the kind */
|
||||
wd->kind = getlocalpathkind();
|
||||
|
@ -13,14 +13,20 @@ See COPYRIGHT for license information.
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "netcdf.h"
|
||||
#include "ncbytes.h"
|
||||
#include "ncuri.h"
|
||||
#include "ncrc.h"
|
||||
#include "nclog.h"
|
||||
#include "ncauth.h"
|
||||
#include "ncpathmgr.h"
|
||||
|
||||
#undef DRCDEBUG
|
||||
#undef LEXDEBUG
|
||||
#undef PARSEDEBUG
|
||||
#undef AWSDEBUG
|
||||
|
||||
#define RTAG ']'
|
||||
#define LTAG '['
|
||||
@ -30,21 +36,32 @@ See COPYRIGHT for license information.
|
||||
#undef MEMCHECK
|
||||
#define MEMCHECK(x) if((x)==NULL) {goto nomem;} else {}
|
||||
|
||||
/* Alternate .aws directory location */
|
||||
#define NC_TEST_AWS_DIR "NC_TEST_AWS_DIR"
|
||||
|
||||
/* Forward */
|
||||
static int NC_rcload(void);
|
||||
static char* rcreadline(char** nextlinep);
|
||||
static void rctrim(char* text);
|
||||
static void rcorder(NClist* rc);
|
||||
static int rccompile(const char* path);
|
||||
static struct NCTriple* rclocate(const char* key, const char* hostport);
|
||||
static struct NCRCentry* rclocate(const char* key, const char* hostport, const char* path);
|
||||
static int rcsearch(const char* prefix, const char* rcname, char** pathp);
|
||||
static void rcfreetriples(NClist* rc);
|
||||
#ifdef D4DEBUG
|
||||
static void storedump(char* msg, NClist* triples);
|
||||
static void rcfreeentries(NClist* rc);
|
||||
static void clearS3credentials(struct S3credentials* creds);
|
||||
#ifdef DRCDEBUG
|
||||
static void storedump(char* msg, NClist* entrys);
|
||||
#endif
|
||||
static int aws_load_credentials(NCRCglobalstate*);
|
||||
static void freeprofile(struct AWSprofile* profile);
|
||||
static void freeprofilelist(NClist* profiles);
|
||||
|
||||
/* Define default rc files and aliases, also defines load order*/
|
||||
static const char* rcfilenames[] = {".ncrc", ".daprc", ".dodsrc",NULL};
|
||||
|
||||
/* Read these files */
|
||||
static const char* awsconfigfiles[] = {".aws/credentials",".aws/config",NULL};
|
||||
|
||||
/**************************************************/
|
||||
/* External Entry Points */
|
||||
|
||||
@ -52,13 +69,12 @@ static NCRCglobalstate* ncrc_globalstate = NULL;
|
||||
|
||||
static int NCRCinitialized = 0;
|
||||
|
||||
/* Initialize defaults */
|
||||
void
|
||||
ncrc_initialize(void)
|
||||
int
|
||||
ncrc_createglobalstate(void)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
const char* tmp = NULL;
|
||||
|
||||
if(NCRCinitialized) return;
|
||||
if(ncrc_globalstate == NULL) {
|
||||
ncrc_globalstate = calloc(1,sizeof(NCRCglobalstate));
|
||||
}
|
||||
@ -68,15 +84,47 @@ ncrc_initialize(void)
|
||||
tmp = getenv(NCRCENVRC);
|
||||
if(tmp != NULL && strlen(tmp) > 0)
|
||||
ncrc_globalstate->rcinfo.rcfile = strdup(tmp);
|
||||
NCRCinitialized = 1;
|
||||
return stat;
|
||||
}
|
||||
|
||||
/*
|
||||
Initialize defaults and load:
|
||||
* .ncrc
|
||||
* .daprc
|
||||
* .dodsrc
|
||||
* ${HOME}/.aws/config
|
||||
* ${HOME}/.aws/credentials
|
||||
|
||||
For debugging support, it is possible
|
||||
to change where the code looks for the .aws directory.
|
||||
This is set by the environment variable NC_TEST_AWS_DIR.
|
||||
|
||||
*/
|
||||
|
||||
void
|
||||
ncrc_initialize(void)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
|
||||
if(NCRCinitialized) return;
|
||||
NCRCinitialized = 1; /* prevent recursion */
|
||||
|
||||
/* Load entrys */
|
||||
if((stat = NC_rcload())) {
|
||||
nclog(NCLOGWARN,".rc loading failed");
|
||||
}
|
||||
/* Load .aws/config */
|
||||
if((stat = aws_load_credentials(ncrc_globalstate))) {
|
||||
nclog(NCLOGWARN,"AWS config file not loaded");
|
||||
}
|
||||
}
|
||||
|
||||
/* Get global state */
|
||||
NCRCglobalstate*
|
||||
ncrc_getglobalstate(void)
|
||||
{
|
||||
if(!NCRCinitialized)
|
||||
ncrc_initialize();
|
||||
if(ncrc_globalstate == NULL)
|
||||
ncrc_createglobalstate();
|
||||
return ncrc_globalstate;
|
||||
}
|
||||
|
||||
@ -88,6 +136,7 @@ ncrc_freeglobalstate(void)
|
||||
nullfree(ncrc_globalstate->home);
|
||||
nullfree(ncrc_globalstate->cwd);
|
||||
NC_rcclear(&ncrc_globalstate->rcinfo);
|
||||
clearS3credentials(&ncrc_globalstate->s3creds);
|
||||
free(ncrc_globalstate);
|
||||
ncrc_globalstate = NULL;
|
||||
}
|
||||
@ -98,15 +147,15 @@ NC_rcclear(NCRCinfo* info)
|
||||
{
|
||||
if(info == NULL) return;
|
||||
nullfree(info->rcfile);
|
||||
rcfreetriples(info->triples);
|
||||
rcfreeentries(info->entries);
|
||||
}
|
||||
|
||||
void
|
||||
rcfreetriples(NClist* rc)
|
||||
rcfreeentries(NClist* rc)
|
||||
{
|
||||
int i;
|
||||
for(i=0;i<nclistlength(rc);i++) {
|
||||
NCTriple* t = (NCTriple*)nclistget(rc,i);
|
||||
NCRCentry* t = (NCRCentry*)nclistget(rc,i);
|
||||
nullfree(t->host);
|
||||
nullfree(t->key);
|
||||
nullfree(t->value);
|
||||
@ -116,7 +165,7 @@ rcfreetriples(NClist* rc)
|
||||
}
|
||||
|
||||
/* locate, read and compile the rc files, if any */
|
||||
int
|
||||
static int
|
||||
NC_rcload(void)
|
||||
{
|
||||
int i,ret = NC_NOERR;
|
||||
@ -142,7 +191,7 @@ NC_rcload(void)
|
||||
4. $CWD/.ncrc
|
||||
5. $CWD/.daprc
|
||||
6. $CWD/.docsrc
|
||||
Entries in later files override any of the earlier files
|
||||
Entry in later files override any of the earlier files
|
||||
*/
|
||||
if(globalstate->rcinfo.rcfile != NULL) { /* always use this */
|
||||
nclistpush(rcfileorder,strdup(globalstate->rcinfo.rcfile));
|
||||
@ -180,16 +229,32 @@ done:
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate a triple by property key and host+port (may be null|"")
|
||||
* Locate a entry by property key and host+port (may be null|"")
|
||||
* If duplicate keys, first takes precedence.
|
||||
*/
|
||||
char*
|
||||
NC_rclookup(const char* key, const char* hostport)
|
||||
NC_rclookup(const char* key, const char* hostport, const char* path)
|
||||
{
|
||||
struct NCTriple* triple = NULL;
|
||||
struct NCRCentry* entry = NULL;
|
||||
if(!NCRCinitialized) ncrc_initialize();
|
||||
triple = rclocate(key,hostport);
|
||||
return (triple == NULL ? NULL : triple->value);
|
||||
entry = rclocate(key,hostport,path);
|
||||
return (entry == NULL ? NULL : entry->value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate a entry by property key and uri.
|
||||
* If duplicate keys, first takes precedence.
|
||||
*/
|
||||
char*
|
||||
NC_rclookupx(NCURI* uri, const char* key)
|
||||
{
|
||||
char* hostport = NULL;
|
||||
char* result = NULL;
|
||||
|
||||
hostport = NC_combinehostport(uri);
|
||||
result = NC_rclookup(key,hostport,uri->path);
|
||||
nullfree(hostport);
|
||||
return result;
|
||||
}
|
||||
|
||||
#if 0
|
||||
@ -224,7 +289,7 @@ NC_set_rcfile(const char* rcfile)
|
||||
globalstate->rcinfo.rcfile = strdup(rcfile);
|
||||
/* Clear globalstate->rcinfo */
|
||||
NC_rcclear(&globalstate->rcinfo);
|
||||
/* (re) load the rcfile and esp the triplestore*/
|
||||
/* (re) load the rcfile and esp the entriestore*/
|
||||
stat = NC_rcload();
|
||||
done:
|
||||
return stat;
|
||||
@ -278,7 +343,7 @@ rctrim(char* text)
|
||||
}
|
||||
}
|
||||
|
||||
/* Order the triples: those with urls must be first,
|
||||
/* Order the entries: those with urls must be first,
|
||||
but otherwise relative order does not matter.
|
||||
*/
|
||||
static void
|
||||
@ -291,29 +356,29 @@ rcorder(NClist* rc)
|
||||
tmprc = nclistnew();
|
||||
/* Copy rc into tmprc and clear rc */
|
||||
for(i=0;i<len;i++) {
|
||||
NCTriple* ti = nclistget(rc,i);
|
||||
NCRCentry* ti = nclistget(rc,i);
|
||||
nclistpush(tmprc,ti);
|
||||
}
|
||||
nclistclear(rc);
|
||||
/* Two passes: 1) pull triples with host */
|
||||
/* Two passes: 1) pull entries with host */
|
||||
for(i=0;i<len;i++) {
|
||||
NCTriple* ti = nclistget(tmprc,i);
|
||||
NCRCentry* ti = nclistget(tmprc,i);
|
||||
if(ti->host == NULL) continue;
|
||||
nclistpush(rc,ti);
|
||||
}
|
||||
/* pass 2 pull triples without host*/
|
||||
/* pass 2 pull entries without host*/
|
||||
for(i=0;i<len;i++) {
|
||||
NCTriple* ti = nclistget(tmprc,i);
|
||||
NCRCentry* ti = nclistget(tmprc,i);
|
||||
if(ti->host != NULL) continue;
|
||||
nclistpush(rc,ti);
|
||||
}
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DRCDEBUG
|
||||
storedump("reorder:",rc);
|
||||
#endif
|
||||
nclistfree(tmprc);
|
||||
}
|
||||
|
||||
/* Merge a triple store from a file*/
|
||||
/* Merge a entry store from a file*/
|
||||
static int
|
||||
rccompile(const char* path)
|
||||
{
|
||||
@ -324,6 +389,7 @@ rccompile(const char* path)
|
||||
NCURI* uri = NULL;
|
||||
char* nextline = NULL;
|
||||
NCRCglobalstate* globalstate = ncrc_getglobalstate();
|
||||
char* bucket = NULL;
|
||||
|
||||
if((ret=NC_readfile(path,tmp))) {
|
||||
nclog(NCLOGWARN, "Could not open configuration file: %s",path);
|
||||
@ -332,10 +398,10 @@ rccompile(const char* path)
|
||||
contents = ncbytesextract(tmp);
|
||||
if(contents == NULL) contents = strdup("");
|
||||
/* Either reuse or create new */
|
||||
rc = globalstate->rcinfo.triples;
|
||||
rc = globalstate->rcinfo.entries;
|
||||
if(rc == NULL) {
|
||||
rc = nclistnew();
|
||||
globalstate->rcinfo.triples = rc;
|
||||
globalstate->rcinfo.entries = rc;
|
||||
}
|
||||
nextline = contents;
|
||||
for(;;) {
|
||||
@ -344,7 +410,7 @@ rccompile(const char* path)
|
||||
char* value = NULL;
|
||||
char* host = NULL;
|
||||
size_t llen;
|
||||
NCTriple* triple;
|
||||
NCRCentry* entry;
|
||||
|
||||
line = rcreadline(&nextline);
|
||||
if(line == NULL) break; /* done */
|
||||
@ -360,12 +426,20 @@ rccompile(const char* path)
|
||||
}
|
||||
line = rtag + 1;
|
||||
*rtag = '\0';
|
||||
/* compile the url and pull out the host */
|
||||
/* compile the url and pull out the host and protocol */
|
||||
if(uri) ncurifree(uri);
|
||||
if(ncuriparse(url,&uri)) {
|
||||
nclog(NCLOGERR, "Malformed [url] in %s entry: %s",path,line);
|
||||
continue;
|
||||
}
|
||||
{ NCURI* newuri = NULL;
|
||||
/* Rebuild the url to path format */
|
||||
nullfree(bucket);
|
||||
if((ret = NC_s3urlrebuild(uri,&newuri,&bucket,NULL))) goto done;
|
||||
ncurifree(uri);
|
||||
uri = newuri;
|
||||
newuri = NULL;
|
||||
}
|
||||
ncbytesclear(tmp);
|
||||
ncbytescat(tmp,uri->host);
|
||||
if(uri->port != NULL) {
|
||||
@ -387,30 +461,30 @@ rccompile(const char* path)
|
||||
value++;
|
||||
}
|
||||
/* See if key already exists */
|
||||
triple = rclocate(key,host);
|
||||
if(triple != NULL) {
|
||||
nullfree(triple->host);
|
||||
nullfree(triple->key);
|
||||
nullfree(triple->value);
|
||||
entry = rclocate(key,host,path);
|
||||
if(entry != NULL) {
|
||||
nullfree(entry->host);
|
||||
nullfree(entry->key);
|
||||
nullfree(entry->value);
|
||||
} else {
|
||||
triple = (NCTriple*)calloc(1,sizeof(NCTriple));
|
||||
if(triple == NULL) {ret = NC_ENOMEM; goto done;}
|
||||
nclistpush(rc,triple);
|
||||
entry = (NCRCentry*)calloc(1,sizeof(NCRCentry));
|
||||
if(entry == NULL) {ret = NC_ENOMEM; goto done;}
|
||||
nclistpush(rc,entry);
|
||||
}
|
||||
triple->host = host; host = NULL;
|
||||
triple->key = nulldup(key);
|
||||
triple->value = nulldup(value);
|
||||
rctrim(triple->host);
|
||||
rctrim(triple->key);
|
||||
rctrim(triple->value);
|
||||
entry->host = host; host = NULL;
|
||||
entry->key = nulldup(key);
|
||||
entry->value = nulldup(value);
|
||||
rctrim(entry->host);
|
||||
rctrim(entry->key);
|
||||
rctrim(entry->value);
|
||||
|
||||
#ifdef DRCDEBUG
|
||||
fprintf(stderr,"rc: host=%s key=%s value=%s\n",
|
||||
(triple->host != NULL ? triple->host : "<null>"),
|
||||
triple->key,triple->value);
|
||||
(entry->host != NULL ? entry->host : "<null>"),
|
||||
entry->key,entry->value);
|
||||
#endif
|
||||
|
||||
triple = NULL;
|
||||
entry = NULL;
|
||||
}
|
||||
rcorder(rc);
|
||||
|
||||
@ -422,16 +496,16 @@ done:
|
||||
}
|
||||
|
||||
/**
|
||||
* (Internal) Locate a triple by property key and host+port (may be null or "").
|
||||
* (Internal) Locate a entry by property key and host+port (may be null or "").
|
||||
* If duplicate keys, first takes precedence.
|
||||
*/
|
||||
static struct NCTriple*
|
||||
rclocate(const char* key, const char* hostport)
|
||||
static struct NCRCentry*
|
||||
rclocate(const char* key, const char* hostport, const char* path)
|
||||
{
|
||||
int i,found;
|
||||
NCRCglobalstate* globalstate = ncrc_getglobalstate();
|
||||
NClist* rc = globalstate->rcinfo.triples;
|
||||
NCTriple* triple = NULL;
|
||||
NClist* rc = globalstate->rcinfo.entries;
|
||||
NCRCentry* entry = NULL;
|
||||
|
||||
if(globalstate->rcinfo.ignore)
|
||||
return NULL;
|
||||
@ -442,21 +516,26 @@ rclocate(const char* key, const char* hostport)
|
||||
for(found=0,i=0;i<nclistlength(rc);i++) {
|
||||
int t;
|
||||
size_t hplen;
|
||||
triple = (NCTriple*)nclistget(rc,i);
|
||||
entry = (NCRCentry*)nclistget(rc,i);
|
||||
|
||||
hplen = (triple->host == NULL ? 0 : strlen(triple->host));
|
||||
hplen = (entry->host == NULL ? 0 : strlen(entry->host));
|
||||
|
||||
if(strcmp(key,triple->key) != 0) continue; /* keys do not match */
|
||||
/* If the triple entry has no url, then use it
|
||||
(because we have checked all other cases)*/
|
||||
if(hplen == 0) {found=1;break;}
|
||||
/* do hostport match */
|
||||
t = 0;
|
||||
if(triple->host != NULL)
|
||||
t = strcmp(hostport,triple->host);
|
||||
if(t == 0) {found=1; break;}
|
||||
if(strcmp(key,entry->key) != 0) continue; /* keys do not match */
|
||||
/* If the entry entry has no url, then use it
|
||||
(because we have checked all other cases)*/
|
||||
if(hplen == 0) {found=1;break;}
|
||||
/* do hostport match */
|
||||
t = 0;
|
||||
if(entry->host != NULL)
|
||||
t = strcmp(hostport,entry->host);
|
||||
/* do path prefix match */
|
||||
if(entry->path != NULL) {
|
||||
size_t pathlen = strlen(entry->path);
|
||||
t = strncmp(path,entry->path,pathlen);
|
||||
}
|
||||
if(t == 0) {found=1; break;}
|
||||
}
|
||||
return (found?triple:NULL);
|
||||
return (found?entry:NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -500,72 +579,481 @@ done:
|
||||
}
|
||||
|
||||
int
|
||||
NC_rcfile_insert(const char* key, const char* value, const char* hostport)
|
||||
NC_rcfile_insert(const char* key, const char* value, const char* hostport, const char* path)
|
||||
{
|
||||
int ret = NC_NOERR;
|
||||
/* See if this key already defined */
|
||||
struct NCTriple* triple = NULL;
|
||||
struct NCRCentry* entry = NULL;
|
||||
NCRCglobalstate* globalstate = NULL;
|
||||
NClist* rc = NULL;
|
||||
|
||||
if(!NCRCinitialized) ncrc_initialize();
|
||||
globalstate = ncrc_getglobalstate();
|
||||
rc = globalstate->rcinfo.triples;
|
||||
rc = globalstate->rcinfo.entries;
|
||||
|
||||
if(rc == NULL) {
|
||||
rc = nclistnew();
|
||||
if(rc == NULL) {ret = NC_ENOMEM; goto done;}
|
||||
}
|
||||
triple = rclocate(key,hostport);
|
||||
if(triple == NULL) {
|
||||
triple = (NCTriple*)calloc(1,sizeof(NCTriple));
|
||||
if(triple == NULL) {ret = NC_ENOMEM; goto done;}
|
||||
triple->key = strdup(key);
|
||||
triple->value = NULL;
|
||||
rctrim(triple->key);
|
||||
triple->host = (hostport == NULL ? NULL : strdup(hostport));
|
||||
nclistpush(rc,triple);
|
||||
entry = rclocate(key,hostport,path);
|
||||
if(entry == NULL) {
|
||||
entry = (NCRCentry*)calloc(1,sizeof(NCRCentry));
|
||||
if(entry == NULL) {ret = NC_ENOMEM; goto done;}
|
||||
entry->key = strdup(key);
|
||||
entry->value = NULL;
|
||||
rctrim(entry->key);
|
||||
entry->host = (hostport == NULL ? NULL : strdup(hostport));
|
||||
nclistpush(rc,entry);
|
||||
}
|
||||
if(triple->value != NULL) free(triple->value);
|
||||
triple->value = strdup(value);
|
||||
rctrim(triple->value);
|
||||
if(entry->value != NULL) free(entry->value);
|
||||
entry->value = strdup(value);
|
||||
rctrim(entry->value);
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Obtain the count of number of triples */
|
||||
/* Obtain the count of number of entries */
|
||||
size_t
|
||||
NC_rcfile_length(NCRCinfo* info)
|
||||
{
|
||||
return nclistlength(info->triples);
|
||||
return nclistlength(info->entries);
|
||||
}
|
||||
|
||||
/* Obtain the ith triple; return NULL if out of range */
|
||||
NCTriple*
|
||||
/* Obtain the ith entry; return NULL if out of range */
|
||||
NCRCentry*
|
||||
NC_rcfile_ith(NCRCinfo* info, size_t i)
|
||||
{
|
||||
if(i >= nclistlength(info->triples))
|
||||
if(i >= nclistlength(info->entries))
|
||||
return NULL;
|
||||
return (NCTriple*)nclistget(info->triples,i);
|
||||
return (NCRCentry*)nclistget(info->entries,i);
|
||||
}
|
||||
|
||||
|
||||
#ifdef D4DEBUG
|
||||
#ifdef DRCDEBUG
|
||||
static void
|
||||
storedump(char* msg, NClist* triples)
|
||||
storedump(char* msg, NClist* entries)
|
||||
{
|
||||
int i;
|
||||
|
||||
if(msg != NULL) fprintf(stderr,"%s\n",msg);
|
||||
if(triples == NULL || nclistlength(triples)==0) {
|
||||
if(entries == NULL || nclistlength(entries)==0) {
|
||||
fprintf(stderr,"<EMPTY>\n");
|
||||
return;
|
||||
}
|
||||
for(i=0;i<nclistlength(triples);i++) {
|
||||
NCTriple* t = (NCTriple*)nclistget(triples,i);
|
||||
for(i=0;i<nclistlength(entries);i++) {
|
||||
NCRCentry* t = (NCRCentry*)nclistget(entries,i);
|
||||
fprintf(stderr,"\t%s\t%s\t%s\n",
|
||||
((t->host == NULL || strlen(t->host)==0)?"--":t->host),t->key,t->value);
|
||||
}
|
||||
fflush(stderr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**************************************************/
|
||||
/*
|
||||
Get the current active profile. The priority order is as follows:
|
||||
1. aws.profile key in mode flags
|
||||
2. aws.profile in .rc entries
|
||||
4. "default"
|
||||
|
||||
@param uri uri with mode flags, may be NULL
|
||||
@param profilep return profile name here or NULL if none found
|
||||
@return NC_NOERR if no error.
|
||||
@return NC_EINVAL if something else went wrong.
|
||||
*/
|
||||
|
||||
int
|
||||
NC_getactives3profile(NCURI* uri, const char** profilep)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
const char* profile = NULL;
|
||||
|
||||
profile = ncurifragmentlookup(uri,"aws.profile");
|
||||
if(profile == NULL)
|
||||
profile = NC_rclookupx(uri,"AWS.PROFILE");
|
||||
if(profile == NULL)
|
||||
profile = "default";
|
||||
#ifdef AWSDEBUG
|
||||
fprintf(stderr,">>> activeprofile = %s\n",(profile?profile:"null"));
|
||||
#endif
|
||||
if(profilep) *profilep = profile;
|
||||
return stat;
|
||||
}
|
||||
|
||||
/*
|
||||
Get the current default region. The search order is as follows:
|
||||
1. aws.region key in mode flags
|
||||
2. aws.region in .rc entries
|
||||
3. aws_region key in current profile (only if profiles are being used)
|
||||
4. "us-east-1"
|
||||
|
||||
@param uri uri with mode flags, may be NULL
|
||||
@param regionp return region name here or NULL if none found
|
||||
@return NC_NOERR if no error.
|
||||
@return NC_EINVAL if something else went wrong.
|
||||
*/
|
||||
|
||||
int
|
||||
NC_getdefaults3region(NCURI* uri, const char** regionp)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
const char* region = NULL;
|
||||
const char* profile = NULL;
|
||||
|
||||
region = ncurifragmentlookup(uri,"aws.region");
|
||||
if(region == NULL)
|
||||
region = NC_rclookupx(uri,"AWS.REGION");
|
||||
if(region == NULL) {/* See if we can find a profile */
|
||||
if((stat = NC_getactives3profile(uri,&profile))==NC_NOERR) {
|
||||
if(profile)
|
||||
(void)NC_s3profilelookup(profile,"aws_region",®ion);
|
||||
}
|
||||
}
|
||||
if(region == NULL)
|
||||
region = "us-east-1";
|
||||
#ifdef AWSDEBUG
|
||||
fprintf(stderr,">>> activeregion = %s\n",(region?region:"null"));
|
||||
#endif
|
||||
if(regionp) *regionp = region;
|
||||
return stat;
|
||||
}
|
||||
|
||||
static void
|
||||
clearS3credentials(struct S3credentials* creds)
|
||||
{
|
||||
if(creds)
|
||||
freeprofilelist(creds->profiles);
|
||||
}
|
||||
|
||||
/**
|
||||
Parser for aws credentials.
|
||||
|
||||
Grammar:
|
||||
|
||||
credsfile: profilelist ;
|
||||
profilelist: profile | profilelist profile ;
|
||||
profile: '[' profilename ']'
|
||||
entries ;
|
||||
entries: empty | entries entry ;
|
||||
entry: WORD = WORD ;
|
||||
profilename: WORD ;
|
||||
Lexical:
|
||||
WORD sequence of printable characters - [ \[\]=]+
|
||||
*/
|
||||
|
||||
#define AWS_EOF (-1)
|
||||
#define AWS_ERR (0)
|
||||
#define AWS_WORD (1)
|
||||
|
||||
#ifdef LEXDEBUG
|
||||
static const char*
|
||||
tokenname(int token)
|
||||
{
|
||||
static char num[32];
|
||||
switch(token) {
|
||||
case AWS_EOF: return "EOF";
|
||||
case AWS_ERR: return "ERR";
|
||||
case AWS_WORD: return "WORD";
|
||||
default: snprintf(num,sizeof(num),"%d",token); return num;
|
||||
}
|
||||
return "UNKNOWN";
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef struct AWSparser {
|
||||
char* text;
|
||||
char* pos;
|
||||
size_t yylen; /* |yytext| */
|
||||
NCbytes* yytext;
|
||||
int token; /* last token found */
|
||||
} AWSparser;
|
||||
|
||||
static int
|
||||
awslex(AWSparser* parser)
|
||||
{
|
||||
int c;
|
||||
int token = 0;
|
||||
char* start;
|
||||
size_t count;
|
||||
|
||||
ncbytesclear(parser->yytext);
|
||||
parser->token = AWS_ERR;
|
||||
|
||||
while(token == 0) { /* avoid need to goto when retrying */
|
||||
c = *parser->pos;
|
||||
if(c == '\0') {
|
||||
token = AWS_EOF;
|
||||
} else if(c <= ' ' || c == '\177') {
|
||||
parser->pos++;
|
||||
continue; /* ignore whitespace */
|
||||
} else if(c == '[' || c == ']' || c == '=') {
|
||||
ncbytesclear(parser->yytext);
|
||||
ncbytesappend(parser->yytext,c);
|
||||
token = c;
|
||||
parser->pos++;
|
||||
} else { /*Assume a word*/
|
||||
start = parser->pos;
|
||||
for(;;) {
|
||||
c = *parser->pos++;
|
||||
if(c <= ' ' || c == '\177' || c == '[' || c == ']' || c == '=') break; /* end of word */
|
||||
}
|
||||
/* Pushback last char */
|
||||
parser->pos--;
|
||||
count = ((parser->pos) - start);
|
||||
ncbytesappendn(parser->yytext,start,count);
|
||||
ncbytesnull(parser->yytext);
|
||||
token = AWS_WORD;
|
||||
}
|
||||
#ifdef LEXDEBUG
|
||||
fprintf(stderr,"%s(%d): |%s|\n",tokenname(token),token,ncbytescontents(parser->yytext));
|
||||
#endif
|
||||
} /*for(;;)*/
|
||||
|
||||
parser->token = token;
|
||||
return token;
|
||||
}
|
||||
|
||||
/*
|
||||
@param text of the aws credentials file
|
||||
@param profiles list of form struct AWSprofile (see ncauth.h)
|
||||
*/
|
||||
|
||||
#define LBR '['
|
||||
#define RBR ']'
|
||||
|
||||
static int
|
||||
awsparse(const char* text, NClist* profiles)
|
||||
{
|
||||
int i,stat = NC_NOERR;
|
||||
size_t len;
|
||||
AWSparser* parser = NULL;
|
||||
struct AWSprofile* profile = NULL;
|
||||
int token;
|
||||
char* key = NULL;
|
||||
char* value = NULL;
|
||||
|
||||
if(text == NULL) text = "";
|
||||
|
||||
parser = calloc(1,sizeof(AWSparser));
|
||||
if(parser == NULL)
|
||||
{stat = (NC_ENOMEM); goto done;}
|
||||
len = strlen(text);
|
||||
parser->text = (char*)malloc(len+1+1);
|
||||
if(parser->text == NULL)
|
||||
{stat = (NC_EINVAL); goto done;}
|
||||
strcpy(parser->text,text);
|
||||
/* Double nul terminate */
|
||||
parser->text[len] = '\0';
|
||||
parser->text[len+1] = '\0';
|
||||
parser->pos = &parser->text[0];
|
||||
parser->yytext = ncbytesnew();
|
||||
|
||||
/* Do not need recursion, use simple loops */
|
||||
token = awslex(parser); /* make token always be defined */
|
||||
for(;;) {
|
||||
if(token == AWS_EOF) break; /* finished */
|
||||
if(token != LBR) {stat = NC_EINVAL; goto done;}
|
||||
token = awslex(parser);
|
||||
if(token != AWS_WORD) {stat = NC_EINVAL; goto done;}
|
||||
assert(profile == NULL);
|
||||
if((profile = (struct AWSprofile*)calloc(1,sizeof(struct AWSprofile)))==NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
profile->name = ncbytesextract(parser->yytext);
|
||||
profile->entries = nclistnew();
|
||||
token = awslex(parser);
|
||||
if(token != RBR) {stat = NC_EINVAL; goto done;}
|
||||
#ifdef PARSEDEBUG
|
||||
fprintf(stderr,">>> parse: profile=%s\n",profile->name);
|
||||
#endif
|
||||
/* The fields can be in any order */
|
||||
for(;;) {
|
||||
struct AWSentry* entry = NULL;
|
||||
token = awslex(parser); /* prime parser */
|
||||
if(token == AWS_EOF || token == LBR)
|
||||
break;
|
||||
if(token != AWS_WORD) {stat = NC_EINVAL; goto done;}
|
||||
key = ncbytesextract(parser->yytext);
|
||||
token = awslex(parser);
|
||||
if(token != '=') {stat = NC_EINVAL; goto done;}
|
||||
token = awslex(parser);
|
||||
if(token != AWS_WORD) {stat = NC_EINVAL; goto done;}
|
||||
value = ncbytesextract(parser->yytext);
|
||||
if((entry = (struct AWSentry*)calloc(1,sizeof(struct AWSentry)))==NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
entry->key = key; key = NULL;
|
||||
entry->value = value; value = NULL;
|
||||
#ifdef PARSEDEBUG
|
||||
fprintf(stderr,">>> parse: entry=(%s,%s)\n",entry->key,entry->value);
|
||||
#endif
|
||||
nclistpush(profile->entries,entry); entry = NULL;
|
||||
}
|
||||
|
||||
/* If this profile already exists, then ignore new one */
|
||||
for(i=0;i<nclistlength(profiles);i++) {
|
||||
struct AWSprofile* p = (struct AWSprofile*)nclistget(profiles,i);
|
||||
if(strcasecmp(p->name,profile->name)==0) {
|
||||
/* reclaim and ignore */
|
||||
freeprofile(profile);
|
||||
profile = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(profile) nclistpush(profiles,profile);
|
||||
profile = NULL;
|
||||
}
|
||||
|
||||
done:
|
||||
if(profile) freeprofile(profile);
|
||||
nullfree(key);
|
||||
nullfree(value);
|
||||
if(parser != NULL) {
|
||||
nullfree(parser->text);
|
||||
ncbytesfree(parser->yytext);
|
||||
free(parser);
|
||||
}
|
||||
return (stat);
|
||||
}
|
||||
|
||||
static void
|
||||
freeentry(struct AWSentry* e)
|
||||
{
|
||||
if(e) {
|
||||
#ifdef AWSDEBUG
|
||||
fprintf(stderr,">>> freeentry: key=%s value=%s\n",e->key,e->value);
|
||||
#endif
|
||||
nullfree(e->key);
|
||||
nullfree(e->value);
|
||||
nullfree(e);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
freeprofile(struct AWSprofile* profile)
|
||||
{
|
||||
if(profile) {
|
||||
int i;
|
||||
#ifdef AWSDEBUG
|
||||
fprintf(stderr,">>> freeprofile: %s\n",profile->name);
|
||||
#endif
|
||||
for(i=0;i<nclistlength(profile->entries);i++) {
|
||||
struct AWSentry* e = (struct AWSentry*)nclistget(profile->entries,i);
|
||||
freeentry(e);
|
||||
}
|
||||
nclistfree(profile->entries);
|
||||
nullfree(profile->name);
|
||||
nullfree(profile);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
freeprofilelist(NClist* profiles)
|
||||
{
|
||||
if(profiles) {
|
||||
int i;
|
||||
for(i=0;i<nclistlength(profiles);i++) {
|
||||
struct AWSprofile* p = (struct AWSprofile*)nclistget(profiles,i);
|
||||
freeprofile(p);
|
||||
}
|
||||
nclistfree(profiles);
|
||||
}
|
||||
}
|
||||
|
||||
/* Find, load, and parse the aws credentials file */
|
||||
static int
|
||||
aws_load_credentials(NCRCglobalstate* gstate)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
struct S3credentials* creds = &gstate->s3creds;
|
||||
NClist* profiles = nclistnew();
|
||||
const char** awscfg = awsconfigfiles;
|
||||
const char* aws_root = getenv(NC_TEST_AWS_DIR);
|
||||
NCbytes* buf = ncbytesnew();
|
||||
char path[8192];
|
||||
|
||||
for(;*awscfg;awscfg++) {
|
||||
/* Construct the path ${HOME}/<file> or Windows equivalent. */
|
||||
const char* cfg = *awscfg;
|
||||
|
||||
snprintf(path,sizeof(path),"%s%s%s",
|
||||
(aws_root?aws_root:gstate->home),
|
||||
(*cfg == '/'?"":"/"),
|
||||
cfg);
|
||||
ncbytesclear(buf);
|
||||
if((stat=NC_readfile(path,buf))) {
|
||||
nclog(NCLOGWARN, "Could not open %s file: %s",path);
|
||||
} else {
|
||||
/* Parse the credentials file */
|
||||
const char* text = ncbytescontents(buf);
|
||||
if((stat = awsparse(text,profiles))) goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* add a "none" credentials */
|
||||
{
|
||||
struct AWSprofile* noprof = (struct AWSprofile*)calloc(1,sizeof(struct AWSprofile));
|
||||
noprof->name = strdup("none");
|
||||
noprof->entries = nclistnew();
|
||||
nclistpush(profiles,noprof); noprof = NULL;
|
||||
}
|
||||
|
||||
creds->profiles = profiles; profiles = NULL;
|
||||
|
||||
#ifdef AWSDEBUG
|
||||
{int i,j;
|
||||
fprintf(stderr,">>> profiles:\n");
|
||||
for(i=0;i<nclistlength(creds->profiles);i++) {
|
||||
struct AWSprofile* p = (struct AWSprofile*)nclistget(creds->profiles,i);
|
||||
fprintf(stderr," [%s]",p->name);
|
||||
for(j=0;j<nclistlength(p->entries);j++) {
|
||||
struct AWSentry* e = (struct AWSentry*)nclistget(p->entries,j);
|
||||
fprintf(stderr," %s=%s",e->key,e->value);
|
||||
}
|
||||
fprintf(stderr,"\n");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
done:
|
||||
ncbytesfree(buf);
|
||||
freeprofilelist(profiles);
|
||||
return stat;
|
||||
}
|
||||
|
||||
int
|
||||
NC_authgets3profile(const char* profilename, struct AWSprofile** profilep)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
int i = -1;
|
||||
NCRCglobalstate* gstate = ncrc_getglobalstate();
|
||||
|
||||
for(i=0;i<nclistlength(gstate->s3creds.profiles);i++) {
|
||||
struct AWSprofile* profile = (struct AWSprofile*)nclistget(gstate->s3creds.profiles,i);
|
||||
if(strcmp(profilename,profile->name)==0)
|
||||
{if(profilep) {*profilep = profile; goto done;}}
|
||||
}
|
||||
if(profilep) *profilep = NULL; /* not found */
|
||||
done:
|
||||
return stat;
|
||||
}
|
||||
|
||||
int
|
||||
NC_s3profilelookup(const char* profile, const char* key, const char** valuep)
|
||||
{
|
||||
int i,stat = NC_NOERR;
|
||||
struct AWSprofile* awsprof = NULL;
|
||||
const char* value = NULL;
|
||||
|
||||
if(profile == NULL) return NC_ES3;
|
||||
stat = NC_authgets3profile(profile,&awsprof);
|
||||
if(stat == NC_NOERR && awsprof != NULL) {
|
||||
for(i=0;i<nclistlength(awsprof->entries);i++) {
|
||||
struct AWSentry* entry = (struct AWSentry*)nclistget(awsprof->entries,i);
|
||||
if(strcasecmp(entry->key,key)==0) {
|
||||
value = entry->value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(valuep) *valuep = value;
|
||||
return stat;
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#ifdef HAVE_UNISTD_H
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
@ -24,6 +25,7 @@
|
||||
#include "ncbytes.h"
|
||||
#include "nclist.h"
|
||||
#include "nclog.h"
|
||||
#include "ncrc.h"
|
||||
#include "ncpathmgr.h"
|
||||
|
||||
#define NC_MAX_PATH 4096
|
||||
@ -31,6 +33,13 @@
|
||||
#define LBRACKET '['
|
||||
#define RBRACKET ']'
|
||||
|
||||
#define AWSHOST ".amazonaws.com"
|
||||
|
||||
enum URLFORMAT {UF_NONE=0, UF_VIRTUAL=1, UF_PATH=2, UF_S3=3, UF_OTHER=4};
|
||||
|
||||
/* Forward */
|
||||
static int endswith(const char* s, const char* suffix);
|
||||
|
||||
/**************************************************/
|
||||
/**
|
||||
* Provide a hidden interface to allow utilities
|
||||
@ -365,3 +374,173 @@ int isnan(double x)
|
||||
|
||||
#endif /*APPLE*/
|
||||
|
||||
|
||||
/**************************************************/
|
||||
/* Generic S3 Utilities */
|
||||
|
||||
/*
|
||||
Rebuild an S3 url into a canonical path-style url.
|
||||
If region is not in the host, then use specified region
|
||||
if provided, otherwise us-east-1.
|
||||
@param url (in) the current url
|
||||
@param region (in) region to use if needed; NULL => us-east-1
|
||||
(out) region from url or the input region
|
||||
@param pathurlp (out) the resulting pathified url string
|
||||
@param bucketp (out) the bucket from the url
|
||||
*/
|
||||
|
||||
int
|
||||
NC_s3urlrebuild(NCURI* url, NCURI** newurlp, char** bucketp, char** outregionp)
|
||||
{
|
||||
int i,stat = NC_NOERR;
|
||||
NClist* hostsegments = NULL;
|
||||
NClist* pathsegments = NULL;
|
||||
NCbytes* buf = ncbytesnew();
|
||||
NCURI* newurl = NULL;
|
||||
char* bucket = NULL;
|
||||
char* host = NULL;
|
||||
char* path = NULL;
|
||||
char* region = NULL;
|
||||
|
||||
if(url == NULL)
|
||||
{stat = NC_EURL; goto done;}
|
||||
|
||||
/* Parse the hostname */
|
||||
hostsegments = nclistnew();
|
||||
/* split the hostname by "." */
|
||||
if((stat = NC_split_delim(url->host,'.',hostsegments))) goto done;
|
||||
|
||||
/* Parse the path*/
|
||||
pathsegments = nclistnew();
|
||||
/* split the path by "/" */
|
||||
if((stat = NC_split_delim(url->path,'/',pathsegments))) goto done;
|
||||
|
||||
/* Distinguish path-style from virtual-host style from s3: and from other.
|
||||
Virtual: https://bucket-name.s3.Region.amazonaws.com/<path>
|
||||
Path: https://s3.Region.amazonaws.com/bucket-name/<path>
|
||||
S3: s3://bucket-name/<path>
|
||||
Other: https://<host>/bucketname/<path>
|
||||
*/
|
||||
if(url->host == NULL || strlen(url->host) == 0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
if(strcmp(url->protocol,"s3")==0 && nclistlength(hostsegments)==1) {
|
||||
bucket = strdup(url->host);
|
||||
region = NULL; /* unknown at this point */
|
||||
} else if(endswith(url->host,AWSHOST)) { /* Virtual or path */
|
||||
switch (nclistlength(hostsegments)) {
|
||||
default: stat = NC_EURL; goto done;
|
||||
case 4:
|
||||
if(strcasecmp(nclistget(hostsegments,0),"s3")!=0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
region = strdup(nclistget(hostsegments,1));
|
||||
if(nclistlength(pathsegments) > 0)
|
||||
bucket = nclistremove(pathsegments,0);
|
||||
break;
|
||||
case 5:
|
||||
if(strcasecmp(nclistget(hostsegments,1),"s3")!=0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
region = strdup(nclistget(hostsegments,2));
|
||||
bucket = strdup(nclistget(hostsegments,0));
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if((host = strdup(url->host))==NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
/* region is unknown */
|
||||
region = NULL;
|
||||
/* bucket is assumed to be start of the path */
|
||||
if(nclistlength(pathsegments) > 0)
|
||||
bucket = nclistremove(pathsegments,0);
|
||||
}
|
||||
/* If region is null, use default */
|
||||
if(region == NULL) {
|
||||
const char* region0 = NULL;
|
||||
/* Get default region */
|
||||
if((stat = NC_getdefaults3region(url,®ion0))) goto done;
|
||||
region = strdup(region0);
|
||||
}
|
||||
/* Construct the revised host */
|
||||
ncbytescat(buf,"s3.");
|
||||
ncbytescat(buf,region);
|
||||
ncbytescat(buf,AWSHOST);
|
||||
host = ncbytesextract(buf);
|
||||
|
||||
/* Construct the revised path */
|
||||
ncbytesclear(buf);
|
||||
ncbytescat(buf,"/");
|
||||
if(bucket == NULL)
|
||||
{stat = NC_EURL; goto done;}
|
||||
ncbytescat(buf,bucket);
|
||||
for(i=0;i<nclistlength(pathsegments);i++) {
|
||||
ncbytescat(buf,"/");
|
||||
ncbytescat(buf,nclistget(pathsegments,i));
|
||||
}
|
||||
path = ncbytesextract(buf);
|
||||
/* complete the new url */
|
||||
if((newurl=ncuriclone(url))==NULL) {stat = NC_ENOMEM; goto done;}
|
||||
ncurisethost(newurl,host);
|
||||
ncurisetpath(newurl,path);
|
||||
/* return various items */
|
||||
if(newurlp) {*newurlp = newurl; newurl = NULL;}
|
||||
if(bucketp) {*bucketp = bucket; bucket = NULL;}
|
||||
if(outregionp) {*outregionp = region; region = NULL;}
|
||||
|
||||
done:
|
||||
nullfree(region);
|
||||
nullfree(bucket)
|
||||
nullfree(host)
|
||||
nullfree(path)
|
||||
ncurifree(newurl);
|
||||
ncbytesfree(buf);
|
||||
nclistfreeall(hostsegments);
|
||||
nclistfreeall(pathsegments);
|
||||
return stat;
|
||||
}
|
||||
|
||||
static int
|
||||
endswith(const char* s, const char* suffix)
|
||||
{
|
||||
ssize_t ls, lsf, delta;
|
||||
if(s == NULL || suffix == NULL) return 0;
|
||||
ls = strlen(s);
|
||||
lsf = strlen(suffix);
|
||||
delta = (ls - lsf);
|
||||
if(delta < 0) return 0;
|
||||
if(memcmp(s+delta,suffix,lsf)!=0) return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
NC_split_delim(const char* path, char delim, NClist* segments)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
const char* p = NULL;
|
||||
const char* q = NULL;
|
||||
ptrdiff_t len = 0;
|
||||
char* seg = NULL;
|
||||
|
||||
if(path == NULL || strlen(path)==0 || segments == NULL)
|
||||
goto done;
|
||||
p = path;
|
||||
if(p[0] == delim) p++;
|
||||
for(;*p;) {
|
||||
q = strchr(p,delim);
|
||||
if(q==NULL)
|
||||
q = p + strlen(p); /* point to trailing nul */
|
||||
len = (q - p);
|
||||
if(len == 0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
if((seg = malloc(len+1)) == NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
memcpy(seg,p,len);
|
||||
seg[len] = '\0';
|
||||
nclistpush(segments,seg);
|
||||
seg = NULL; /* avoid mem errors */
|
||||
if(*q) p = q+1; else p = q;
|
||||
}
|
||||
|
||||
done:
|
||||
nullfree(seg);
|
||||
return stat;
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
/*
|
||||
* Copyright 1998-2018 University Corporation for Atmospheric Research/Unidata
|
||||
* See the LICENSE file for more information.
|
||||
*/
|
||||
|
||||
/* ezxml.c
|
||||
*
|
||||
* Copyright 2004-2006 Aaron Voisine <aaron@voisine.org>
|
||||
@ -44,22 +49,30 @@ typedef struct ezxml_root *ezxml_root_t;
|
||||
struct ezxml_root { /* additional data for the root tag*/
|
||||
struct ezxml xml; /* is a super-struct built on top of ezxml struct*/
|
||||
ezxml_t cur; /* current xml tree insertion point*/
|
||||
char *m; /* original xml string*/
|
||||
char* m; /* original xml string*/
|
||||
size_t len; /* length of allocated memory for mmap, -1 for malloc*/
|
||||
char *u; /* UTF-8 conversion of string if original was UTF-16*/
|
||||
char *s; /* start of work area*/
|
||||
char *e; /* end of work area*/
|
||||
char **ent; /* general entities (ampersand sequences)*/
|
||||
char ***attr; /* default attributes*/
|
||||
char ***pi; /* processing instructions*/
|
||||
char* u; /* UTF-8 conversion of string if original was UTF-16*/
|
||||
char* s; /* start of work area*/
|
||||
char* e; /* end of work area*/
|
||||
char* *ent; /* general entities (ampersand sequences)*/
|
||||
char* **attr; /* default attributes*/
|
||||
char* **pi; /* processing instructions*/
|
||||
short standalone; /* non-zero if <?xml standalone="yes"?>*/
|
||||
char err[EZXML_ERRL]; /* error string*/
|
||||
};
|
||||
|
||||
static const char *EZXML_NIL[] = { NULL }; /* empty, null terminated array of strings*/
|
||||
static const char* EZXML_NIL[] = { NULL }; /* empty, null terminated array of strings*/
|
||||
|
||||
/* Forward */
|
||||
static ezxml_t ezxml_add_child(ezxml_t xml, const char* name, size_t off);
|
||||
static ezxml_t ezxml_set_flag(ezxml_t xml, short flag);
|
||||
static ezxml_t ezxml_err(ezxml_root_t root, char* s, const char* err, ...);
|
||||
static ezxml_t ezxml_new(const char* name);
|
||||
static ezxml_t ezxml_vget(ezxml_t xml, va_list ap);
|
||||
|
||||
/* returns the first child tag with the given name or NULL if not found*/
|
||||
ezxml_t ezxml_child(ezxml_t xml, const char *name)
|
||||
ezxml_t
|
||||
nc_ezxml_child(ezxml_t xml, const char* name)
|
||||
{
|
||||
xml = (xml) ? xml->child : NULL;
|
||||
while (xml && strcmp(name, xml->name)) xml = xml->sibling;
|
||||
@ -68,14 +81,16 @@ ezxml_t ezxml_child(ezxml_t xml, const char *name)
|
||||
|
||||
/* returns the Nth tag with the same name in the same subsection or NULL if not*/
|
||||
/* found*/
|
||||
ezxml_t ezxml_idx(ezxml_t xml, int idx)
|
||||
ezxml_t
|
||||
nc_ezxml_idx(ezxml_t xml, int idx)
|
||||
{
|
||||
for (; xml && idx; idx--) xml = xml->next;
|
||||
return xml;
|
||||
}
|
||||
|
||||
/* returns the value of the requested tag attribute or NULL if not found*/
|
||||
const char *ezxml_attr(ezxml_t xml, const char *attr)
|
||||
const char*
|
||||
nc_ezxml_attr(ezxml_t xml, const char* attr)
|
||||
{
|
||||
int i = 0, j = 1;
|
||||
ezxml_root_t root = (ezxml_root_t)xml;
|
||||
@ -92,16 +107,17 @@ const char *ezxml_attr(ezxml_t xml, const char *attr)
|
||||
}
|
||||
|
||||
/* same as ezxml_get but takes an already initialized va_list*/
|
||||
ezxml_t ezxml_vget(ezxml_t xml, va_list ap)
|
||||
static ezxml_t
|
||||
ezxml_vget(ezxml_t xml, va_list ap)
|
||||
{
|
||||
char *name = va_arg(ap, char *);
|
||||
char* name = va_arg(ap, char* );
|
||||
int idx = -1;
|
||||
|
||||
if (name && *name) {
|
||||
idx = va_arg(ap, int);
|
||||
xml = ezxml_child(xml, name);
|
||||
xml = nc_ezxml_child(xml, name);
|
||||
}
|
||||
return (idx < 0) ? xml : ezxml_vget(ezxml_idx(xml, idx), ap);
|
||||
return (idx < 0) ? xml : ezxml_vget(nc_ezxml_idx(xml, idx), ap);
|
||||
}
|
||||
|
||||
/* Traverses the xml tree to retrieve a specific subtag. Takes a variable*/
|
||||
@ -110,7 +126,8 @@ ezxml_t ezxml_vget(ezxml_t xml, va_list ap)
|
||||
/* title = ezxml_get(library, "shelf", 0, "book", 2, "title", -1);*/
|
||||
/* This retrieves the title of the 3rd book on the 1st shelf of library.*/
|
||||
/* Returns NULL if not found.*/
|
||||
ezxml_t ezxml_get(ezxml_t xml, ...)
|
||||
ezxml_t
|
||||
nc_ezxml_get(ezxml_t xml, ...)
|
||||
{
|
||||
va_list ap;
|
||||
ezxml_t r;
|
||||
@ -123,23 +140,25 @@ ezxml_t ezxml_get(ezxml_t xml, ...)
|
||||
|
||||
/* returns a null terminated array of processing instructions for the given*/
|
||||
/* target*/
|
||||
const char **ezxml_pi(ezxml_t xml, const char *target)
|
||||
const char**
|
||||
nc_ezxml_pi(ezxml_t xml, const char* target)
|
||||
{
|
||||
ezxml_root_t root = (ezxml_root_t)xml;
|
||||
int i = 0;
|
||||
|
||||
if (! root) return (const char **)EZXML_NIL;
|
||||
if (! root) return (const char* *)EZXML_NIL;
|
||||
while (root->xml.parent) root = (ezxml_root_t)root->xml.parent; /* root tag*/
|
||||
while (root->pi[i] && strcmp(target, root->pi[i][0])) i++; /* find target*/
|
||||
return ((root->pi[i]) ? (const char**)(root->pi[i] + 1) : EZXML_NIL);
|
||||
}
|
||||
|
||||
/* set an error string and return root*/
|
||||
ezxml_t ezxml_err(ezxml_root_t root, char *s, const char *err, ...)
|
||||
static ezxml_t
|
||||
ezxml_err(ezxml_root_t root, char* s, const char* err, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int line = 1;
|
||||
char *t, fmt[EZXML_ERRL];
|
||||
char* t, fmt[EZXML_ERRL];
|
||||
|
||||
for (t = root->s; t < s; t++) if (*t == '\n') line++;
|
||||
snprintf(fmt, EZXML_ERRL, "[error near line %d]: %s", line, err);
|
||||
@ -157,9 +176,10 @@ ezxml_t ezxml_err(ezxml_root_t root, char *s, const char *err, ...)
|
||||
/* for cdata sections, ' ' for attribute normalization, or '*' for non-cdata*/
|
||||
/* attribute normalization. Returns s, or if the decoded string is longer than*/
|
||||
/* s, returns a malloced string that must be freed.*/
|
||||
char *ezxml_decode(char *s, char **ent, char t)
|
||||
static char*
|
||||
ezxml_decode(char* s, char* *ent, char t)
|
||||
{
|
||||
char *e, *r = s, *m = s;
|
||||
char* e, *r = s, *m = s;
|
||||
long b, c, d, l;
|
||||
|
||||
for (; *s; s++) { /* normalize line endings*/
|
||||
@ -220,7 +240,8 @@ char *ezxml_decode(char *s, char **ent, char t)
|
||||
}
|
||||
|
||||
/* called when parser finds start of new tag*/
|
||||
void ezxml_open_tag(ezxml_root_t root, char *name, char **attr)
|
||||
static void
|
||||
ezxml_open_tag(ezxml_root_t root, char* name, char* *attr)
|
||||
{
|
||||
ezxml_t xml = root->cur;
|
||||
|
||||
@ -232,10 +253,11 @@ void ezxml_open_tag(ezxml_root_t root, char *name, char **attr)
|
||||
}
|
||||
|
||||
/* called when parser finds character content between open and closing tag*/
|
||||
void ezxml_char_content(ezxml_root_t root, char *s, size_t len, char t)
|
||||
static void
|
||||
ezxml_char_content(ezxml_root_t root, char* s, size_t len, char t)
|
||||
{
|
||||
ezxml_t xml = root->cur;
|
||||
char *m = s;
|
||||
char* m = s;
|
||||
size_t l;
|
||||
|
||||
if (! xml || ! xml->name || ! len) return; /* sanity check*/
|
||||
@ -256,7 +278,8 @@ void ezxml_char_content(ezxml_root_t root, char *s, size_t len, char t)
|
||||
}
|
||||
|
||||
/* called when parser finds closing tag*/
|
||||
ezxml_t ezxml_close_tag(ezxml_root_t root, char *name, char *s)
|
||||
static ezxml_t
|
||||
ezxml_close_tag(ezxml_root_t root, char* name, char* s)
|
||||
{
|
||||
if (! root->cur || ! root->cur->name || strcmp(name, root->cur->name))
|
||||
return ezxml_err(root, s, "unexpected closing tag </%s>", name);
|
||||
@ -267,7 +290,8 @@ ezxml_t ezxml_close_tag(ezxml_root_t root, char *name, char *s)
|
||||
|
||||
/* checks for circular entity references, returns non-zero if no circular*/
|
||||
/* references are found, zero otherwise*/
|
||||
int ezxml_ent_ok(char *name, char *s, char **ent)
|
||||
static int
|
||||
ezxml_ent_ok(char* name, char* s, char* *ent)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -281,10 +305,11 @@ int ezxml_ent_ok(char *name, char *s, char **ent)
|
||||
}
|
||||
|
||||
/* called when the parser finds a processing instruction*/
|
||||
void ezxml_proc_inst(ezxml_root_t root, char *s, size_t len)
|
||||
static void
|
||||
ezxml_proc_inst(ezxml_root_t root, char* s, size_t len)
|
||||
{
|
||||
int i = 0, j = 1;
|
||||
char *target = s;
|
||||
char* target = s;
|
||||
|
||||
s[len] = '\0'; /* null terminate instruction*/
|
||||
if (*(s += strcspn(s, EZXML_WS))) {
|
||||
@ -298,19 +323,19 @@ void ezxml_proc_inst(ezxml_root_t root, char *s, size_t len)
|
||||
return;
|
||||
}
|
||||
|
||||
if (! root->pi[0]) *(root->pi = malloc(sizeof(char **))) = NULL; /*first pi*/
|
||||
if (! root->pi[0]) *(root->pi = malloc(sizeof(char* *))) = NULL; /*first pi*/
|
||||
|
||||
while (root->pi[i] && strcmp(target, root->pi[i][0])) i++; /* find target*/
|
||||
if (! root->pi[i]) { /* new target*/
|
||||
root->pi = realloc(root->pi, sizeof(char **) * (i + 2));
|
||||
root->pi[i] = malloc(sizeof(char *) * 3);
|
||||
root->pi = realloc(root->pi, sizeof(char* *) * (i + 2));
|
||||
root->pi[i] = malloc(sizeof(char* ) * 3);
|
||||
root->pi[i][0] = target;
|
||||
root->pi[i][1] = (char *)(root->pi[i + 1] = NULL); /* terminate pi list*/
|
||||
root->pi[i][1] = (char* )(root->pi[i + 1] = NULL); /* terminate pi list*/
|
||||
root->pi[i][2] = strdup(""); /* empty document position list*/
|
||||
}
|
||||
|
||||
while (root->pi[i][j]) j++; /* find end of instruction list for this target*/
|
||||
root->pi[i] = realloc(root->pi[i], sizeof(char *) * (j + 3));
|
||||
root->pi[i] = realloc(root->pi[i], sizeof(char* ) * (j + 3));
|
||||
root->pi[i][j + 2] = realloc(root->pi[i][j + 1], j + 1);
|
||||
strcpy(root->pi[i][j + 2] + j - 1, (root->xml.name) ? ">" : "<");
|
||||
root->pi[i][j + 1] = NULL; /* null terminate pi list for this target*/
|
||||
@ -318,7 +343,8 @@ void ezxml_proc_inst(ezxml_root_t root, char *s, size_t len)
|
||||
}
|
||||
|
||||
/* called when the parser finds an internal doctype subset*/
|
||||
short ezxml_internal_dtd(ezxml_root_t root, char *s, size_t len)
|
||||
static short
|
||||
ezxml_internal_dtd(ezxml_root_t root, char* s, size_t len)
|
||||
{
|
||||
char q, *c, *t, *n = NULL, *v, **ent, **pe;
|
||||
int i, j;
|
||||
@ -341,7 +367,7 @@ short ezxml_internal_dtd(ezxml_root_t root, char *s, size_t len)
|
||||
}
|
||||
|
||||
for (i = 0, ent = (*c == '%') ? pe : root->ent; ent[i]; i++);
|
||||
ent = realloc(ent, (i + 3) * sizeof(char *)); /* space for next ent*/
|
||||
ent = realloc(ent, (i + 3) * sizeof(char* )); /* space for next ent*/
|
||||
if (*c == '%') pe = ent;
|
||||
else root->ent = ent;
|
||||
|
||||
@ -389,17 +415,17 @@ short ezxml_internal_dtd(ezxml_root_t root, char *s, size_t len)
|
||||
else { ezxml_err(root, t, "malformed <!ATTLIST"); break; }
|
||||
|
||||
if (! root->attr[i]) { /* new tag name*/
|
||||
root->attr = (! i) ? malloc(2 * sizeof(char **))
|
||||
root->attr = (! i) ? malloc(2 * sizeof(char* *))
|
||||
: realloc(root->attr,
|
||||
(i + 2) * sizeof(char **));
|
||||
root->attr[i] = malloc(2 * sizeof(char *));
|
||||
(i + 2) * sizeof(char* *));
|
||||
root->attr[i] = malloc(2 * sizeof(char* ));
|
||||
root->attr[i][0] = t; /* set tag name*/
|
||||
root->attr[i][1] = (char *)(root->attr[i + 1] = NULL);
|
||||
root->attr[i][1] = (char* )(root->attr[i + 1] = NULL);
|
||||
}
|
||||
|
||||
for (j = 1; root->attr[i][j]; j += 3); /* find end of list*/
|
||||
root->attr[i] = realloc(root->attr[i],
|
||||
(j + 4) * sizeof(char *));
|
||||
(j + 4) * sizeof(char* ));
|
||||
|
||||
root->attr[i][j + 3] = NULL; /* null terminate list*/
|
||||
root->attr[i][j + 2] = c; /* is it cdata?*/
|
||||
@ -423,9 +449,10 @@ short ezxml_internal_dtd(ezxml_root_t root, char *s, size_t len)
|
||||
|
||||
/* Converts a UTF-16 string to UTF-8. Returns a new string that must be freed*/
|
||||
/* or NULL if no conversion was needed.*/
|
||||
char *ezxml_str2utf8(char **s, size_t *len)
|
||||
static char*
|
||||
ezxml_str2utf8(char* *s, size_t *len)
|
||||
{
|
||||
char *u;
|
||||
char* u;
|
||||
size_t l = 0, sl, max = *len;
|
||||
long c, d;
|
||||
int b, be = (**s == '\xFE') ? 1 : (**s == '\xFF') ? 0 : -1;
|
||||
@ -455,9 +482,11 @@ char *ezxml_str2utf8(char **s, size_t *len)
|
||||
}
|
||||
|
||||
/* frees a tag attribute list*/
|
||||
void ezxml_free_attr(char **attr) {
|
||||
static void
|
||||
ezxml_free_attr(char* *attr)
|
||||
{
|
||||
int i = 0;
|
||||
char *m;
|
||||
char* m;
|
||||
|
||||
if (! attr || ((const char**)attr) == EZXML_NIL) return; /* nothing to free*/
|
||||
while (attr[i]) i += 2; /* find end of attribute list*/
|
||||
@ -471,7 +500,8 @@ void ezxml_free_attr(char **attr) {
|
||||
}
|
||||
|
||||
/* parse the given xml string and return an ezxml structure*/
|
||||
ezxml_t ezxml_parse_str(char *s, size_t len)
|
||||
ezxml_t
|
||||
nc_ezxml_parse_str(char* s, size_t len)
|
||||
{
|
||||
ezxml_root_t root = (ezxml_root_t)ezxml_new(NULL);
|
||||
char q, e, *d, **attr, **a = NULL; /* initialize a to avoid compile warning*/
|
||||
@ -489,7 +519,7 @@ ezxml_t ezxml_parse_str(char *s, size_t len)
|
||||
if (! *s) return ezxml_err(root, s, "root tag missing");
|
||||
|
||||
for (; ; ) {
|
||||
attr = (char **)EZXML_NIL;
|
||||
attr = (char* *)EZXML_NIL;
|
||||
d = ++s;
|
||||
|
||||
if (isalpha(*s) || *s == '_' || *s == ':' || *s < '\0') { /* new tag*/
|
||||
@ -503,8 +533,8 @@ ezxml_t ezxml_parse_str(char *s, size_t len)
|
||||
for (i = 0; (a = root->attr[i]) && strcmp(a[0], d); i++);
|
||||
|
||||
for (l = 0; *s && *s != '/' && *s != '>'; l += 2) { /* new attrib*/
|
||||
attr = (l) ? realloc(attr, (l + 4) * sizeof(char *))
|
||||
: malloc(4 * sizeof(char *)); /* allocate space*/
|
||||
attr = (l) ? realloc(attr, (l + 4) * sizeof(char* ))
|
||||
: malloc(4 * sizeof(char* )); /* allocate space*/
|
||||
attr[l + 3] = (l) ? realloc(attr[l + 1], (l / 2) + 2)
|
||||
: malloc(2); /* mem for list of maloced vals*/
|
||||
strcpy(attr[l + 3] + (l / 2), " "); /* value is not malloced*/
|
||||
@ -603,33 +633,12 @@ ezxml_t ezxml_parse_str(char *s, size_t len)
|
||||
else return ezxml_err(root, d, "unclosed tag <%s>", root->cur->name);
|
||||
}
|
||||
|
||||
/* Wrapper for ezxml_parse_str() that accepts a file stream. Reads the entire*/
|
||||
/* stream into memory and then parses it. For xml files, use ezxml_parse_file()*/
|
||||
/* or ezxml_parse_fd()*/
|
||||
ezxml_t ezxml_parse_fp(FILE *fp)
|
||||
{
|
||||
ezxml_root_t root;
|
||||
size_t l, len = 0;
|
||||
char *s;
|
||||
|
||||
if (! (s = malloc(EZXML_BUFSIZE))) return NULL;
|
||||
do {
|
||||
len += (l = fread((s + len), 1, EZXML_BUFSIZE, fp));
|
||||
if (l == EZXML_BUFSIZE) s = realloc(s, len + EZXML_BUFSIZE);
|
||||
} while (s && l == EZXML_BUFSIZE);
|
||||
|
||||
if (! s) return NULL;
|
||||
root = (ezxml_root_t)ezxml_parse_str(s, len);
|
||||
root->len = -1; /* so we know to free s in ezxml_free()*/
|
||||
return &root->xml;
|
||||
}
|
||||
|
||||
/* Encodes ampersand sequences appending the results to *dst, reallocating *dst*/
|
||||
/* if length excedes max. a is non-zero for attribute encoding. Returns *dst*/
|
||||
char *ezxml_ampencode(const char *s, size_t len, char **dst, size_t *dlen,
|
||||
size_t *max, short a)
|
||||
static char*
|
||||
ezxml_ampencode(const char* s, size_t len, char* *dst, size_t *dlen, size_t *max, short a)
|
||||
{
|
||||
const char *e;
|
||||
const char* e;
|
||||
|
||||
for (e = s + len; s != e; s++) {
|
||||
while (*dlen + 10 > *max) *dst = realloc(*dst, *max += EZXML_BUFSIZE);
|
||||
@ -652,11 +661,11 @@ char *ezxml_ampencode(const char *s, size_t len, char **dst, size_t *dlen,
|
||||
/* Recursively converts each tag to xml appending it to *s. Reallocates *s if*/
|
||||
/* its length excedes max. start is the location of the previous tag in the*/
|
||||
/* parent tag's character content. Returns *s.*/
|
||||
char *ezxml_toxml_r(ezxml_t xml, char **s, size_t *len, size_t *max,
|
||||
size_t start, char ***attr)
|
||||
static char*
|
||||
ezxml_toxml_r(ezxml_t xml, char* *s, size_t *len, size_t *max, size_t start, char* **attr)
|
||||
{
|
||||
int i, j;
|
||||
char *txt = (xml->parent) ? xml->parent->txt : "";
|
||||
char* txt = (xml->parent) ? xml->parent->txt : "";
|
||||
size_t off = 0;
|
||||
|
||||
/* parent character content up to this tag*/
|
||||
@ -667,7 +676,7 @@ char *ezxml_toxml_r(ezxml_t xml, char **s, size_t *len, size_t *max,
|
||||
|
||||
*len += sprintf(*s + *len, "<%s", xml->name); /* open tag*/
|
||||
for (i = 0; xml->attr[i]; i += 2) { /* tag attributes*/
|
||||
if (ezxml_attr(xml, xml->attr[i]) != xml->attr[i + 1]) continue;
|
||||
if (nc_ezxml_attr(xml, xml->attr[i]) != xml->attr[i + 1]) continue;
|
||||
while (*len + strlen(xml->attr[i]) + 7 > *max) /* reallocate s*/
|
||||
*s = realloc(*s, *max += EZXML_BUFSIZE);
|
||||
|
||||
@ -678,7 +687,7 @@ char *ezxml_toxml_r(ezxml_t xml, char **s, size_t *len, size_t *max,
|
||||
|
||||
for (i = 0; attr[i] && strcmp(attr[i][0], xml->name); i++);
|
||||
for (j = 1; attr[i] && attr[i][j]; j += 3) { /* default attributes*/
|
||||
if (! attr[i][j + 1] || ezxml_attr(xml, attr[i][j]) != attr[i][j + 1])
|
||||
if (! attr[i][j + 1] || nc_ezxml_attr(xml, attr[i][j]) != attr[i][j + 1])
|
||||
continue; /* skip duplicates and non-values*/
|
||||
while (*len + strlen(attr[i][j]) + 7 > *max) /* reallocate s*/
|
||||
*s = realloc(*s, *max += EZXML_BUFSIZE);
|
||||
@ -704,12 +713,13 @@ char *ezxml_toxml_r(ezxml_t xml, char **s, size_t *len, size_t *max,
|
||||
|
||||
/* Converts an ezxml structure back to xml. Returns a string of xml data that*/
|
||||
/* must be freed.*/
|
||||
char *ezxml_toxml(ezxml_t xml)
|
||||
char*
|
||||
nc_ezxml_toxml(ezxml_t xml)
|
||||
{
|
||||
ezxml_t p = (xml) ? xml->parent : NULL, o = (xml) ? xml->ordered : NULL;
|
||||
ezxml_root_t root = (ezxml_root_t)xml;
|
||||
size_t len = 0, max = EZXML_BUFSIZE;
|
||||
char *s = strcpy(malloc(max), ""), *t, *n;
|
||||
char* s = strcpy(malloc(max), ""), *t, *n;
|
||||
int i, j, k;
|
||||
|
||||
if (! xml || ! xml->name) return realloc(s, len + 1);
|
||||
@ -743,15 +753,16 @@ char *ezxml_toxml(ezxml_t xml)
|
||||
}
|
||||
|
||||
/* free the memory allocated for the ezxml structure*/
|
||||
void ezxml_free(ezxml_t xml)
|
||||
void
|
||||
nc_ezxml_free(ezxml_t xml)
|
||||
{
|
||||
ezxml_root_t root = (ezxml_root_t)xml;
|
||||
int i, j;
|
||||
char **a, *s;
|
||||
char* *a, *s;
|
||||
|
||||
if (! xml) return;
|
||||
ezxml_free(xml->child);
|
||||
ezxml_free(xml->ordered);
|
||||
nc_ezxml_free(xml->child);
|
||||
nc_ezxml_free(xml->ordered);
|
||||
|
||||
if (! xml->parent) { /* free root tag allocations*/
|
||||
for (i = 10; root->ent[i]; i += 2) /* 0 - 9 are default entities (<>&"')*/
|
||||
@ -786,29 +797,32 @@ void ezxml_free(ezxml_t xml)
|
||||
}
|
||||
|
||||
/* return parser error message or empty string if none*/
|
||||
const char *ezxml_error(ezxml_t xml)
|
||||
const char*
|
||||
nc_ezxml_error(ezxml_t xml)
|
||||
{
|
||||
while (xml && xml->parent) xml = xml->parent; /* find root tag*/
|
||||
return (xml) ? ((ezxml_root_t)xml)->err : "";
|
||||
}
|
||||
|
||||
/* returns a new empty ezxml structure with the given root tag name*/
|
||||
ezxml_t ezxml_new(const char *name)
|
||||
static ezxml_t
|
||||
ezxml_new(const char* name)
|
||||
{
|
||||
static const char *entities[] = { "lt;", "<", "gt;", ">", "quot;", """,
|
||||
static const char* entities[] = { "lt;", "<", "gt;", ">", "quot;", """,
|
||||
"apos;", "'", "amp;", "&", NULL };
|
||||
ezxml_root_t root = (ezxml_root_t)memset(malloc(sizeof(struct ezxml_root)),
|
||||
'\0', sizeof(struct ezxml_root));
|
||||
root->xml.name = (char *)name;
|
||||
root->xml.name = (char* )name;
|
||||
root->cur = &root->xml;
|
||||
strcpy(root->err, root->xml.txt = "");
|
||||
root->ent = memcpy(malloc(sizeof(entities)), entities, sizeof(entities));
|
||||
root->attr = root->pi = (char ***)(root->xml.attr = (char**)EZXML_NIL);
|
||||
root->attr = root->pi = (char* **)(root->xml.attr = (char**)EZXML_NIL);
|
||||
return &root->xml;
|
||||
}
|
||||
|
||||
/* inserts an existing tag into an ezxml structure*/
|
||||
ezxml_t ezxml_insert(ezxml_t xml, ezxml_t dest, size_t off)
|
||||
static ezxml_t
|
||||
ezxml_insert(ezxml_t xml, ezxml_t dest, size_t off)
|
||||
{
|
||||
ezxml_t cur, prev, head;
|
||||
|
||||
@ -851,33 +865,105 @@ ezxml_t ezxml_insert(ezxml_t xml, ezxml_t dest, size_t off)
|
||||
|
||||
/* Adds a child tag. off is the offset of the child tag relative to the start*/
|
||||
/* of the parent tag's character content. Returns the child tag.*/
|
||||
ezxml_t ezxml_add_child(ezxml_t xml, const char *name, size_t off)
|
||||
static ezxml_t
|
||||
ezxml_add_child(ezxml_t xml, const char* name, size_t off)
|
||||
{
|
||||
ezxml_t child;
|
||||
|
||||
if (! xml) return NULL;
|
||||
child = (ezxml_t)memset(malloc(sizeof(struct ezxml)), '\0',
|
||||
sizeof(struct ezxml));
|
||||
child->name = (char *)name;
|
||||
child->name = (char* )name;
|
||||
child->attr = (char**)EZXML_NIL;
|
||||
child->txt = "";
|
||||
|
||||
return ezxml_insert(child, xml, off);
|
||||
}
|
||||
|
||||
/* sets a flag for the given tag and returns the tag*/
|
||||
static ezxml_t
|
||||
ezxml_set_flag(ezxml_t xml, short flag)
|
||||
{
|
||||
if (xml) xml->flags |= flag;
|
||||
return xml;
|
||||
}
|
||||
|
||||
/**
|
||||
Extra ezxml functionality
|
||||
*/
|
||||
|
||||
/**
|
||||
Get list of all the xml attributes.
|
||||
Returns NULL, if none
|
||||
WARNING: returns actual list, so do not free
|
||||
*/
|
||||
const char**
|
||||
nc_ezxml_all_attr(ezxml_t xml, int* countp)
|
||||
{
|
||||
if(xml && xml->attr) {
|
||||
char** p;
|
||||
int count = 0;
|
||||
for(p=xml->attr;*p;p+=2) count += 2; /* get number of attributes */
|
||||
return (const char**)xml->attr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef EZXML_TEST /* test harness*/
|
||||
int main(int argc, char* *argv)
|
||||
{
|
||||
ezxml_t xml;
|
||||
char* s;
|
||||
int i;
|
||||
|
||||
if (argc != 2) return fprintf(stderr, "usage: %s xmlfile\n", argv[0]);
|
||||
|
||||
xml = ezxml_parse_file(argv[1]);
|
||||
printf("%s\n", (s = ezxml_toxml(xml)));
|
||||
free(s);
|
||||
i = fprintf(stderr, "%s", ezxml_error(xml));
|
||||
ezxml_free(xml);
|
||||
return (i) ? 1 : 0;
|
||||
}
|
||||
#endif /* EZXML_TEST*/
|
||||
|
||||
#if -0
|
||||
/* Wrapper for ezxml_parse_str() that accepts a file stream. Reads the entire*/
|
||||
/* stream into memory and then parses it. For xml files, use ezxml_parse_file()*/
|
||||
/* or ezxml_parse_fd()*/
|
||||
ezxml_t ezxml_parse_fp(FILE *fp)
|
||||
{
|
||||
ezxml_root_t root;
|
||||
size_t l, len = 0;
|
||||
char *s;
|
||||
|
||||
if (! (s = malloc(EZXML_BUFSIZE))) return NULL;
|
||||
do {
|
||||
len += (l = fread((s + len), 1, EZXML_BUFSIZE, fp));
|
||||
if (l == EZXML_BUFSIZE) s = realloc(s, len + EZXML_BUFSIZE);
|
||||
} while (s && l == EZXML_BUFSIZE);
|
||||
|
||||
if (! s) return NULL;
|
||||
root = (ezxml_root_t)ezxml_parse_str(s, len);
|
||||
root->len = -1; /* so we know to free s in ezxml_free()*/
|
||||
return &root->xml;
|
||||
}
|
||||
|
||||
/* sets the character content for the given tag and returns the tag*/
|
||||
ezxml_t ezxml_set_txt(ezxml_t xml, const char *txt)
|
||||
static ezxml_t
|
||||
ezxml_set_txt(ezxml_t xml, const char* txt)
|
||||
{
|
||||
if (! xml) return NULL;
|
||||
if (xml->flags & EZXML_TXTM) free(xml->txt); /* existing txt was malloced*/
|
||||
xml->flags &= ~EZXML_TXTM;
|
||||
xml->txt = (char *)txt;
|
||||
xml->txt = (char* )txt;
|
||||
return xml;
|
||||
}
|
||||
|
||||
/* Sets the given tag attribute or adds a new attribute if not found. A value*/
|
||||
/* of NULL will remove the specified attribute. Returns the tag given.*/
|
||||
ezxml_t ezxml_set_attr(ezxml_t xml, const char *name, const char *value)
|
||||
static ezxml_t
|
||||
ezxml_set_attr(ezxml_t xml, const char* name, const char* value)
|
||||
{
|
||||
int l = 0, c;
|
||||
|
||||
@ -886,30 +972,30 @@ ezxml_t ezxml_set_attr(ezxml_t xml, const char *name, const char *value)
|
||||
if (! xml->attr[l]) { /* not found, add as new attribute*/
|
||||
if (! value) return xml; /* nothing to do*/
|
||||
if (xml->attr == (char**)EZXML_NIL) { /* first attribute*/
|
||||
xml->attr = malloc(4 * sizeof(char *));
|
||||
xml->attr = malloc(4 * sizeof(char* ));
|
||||
xml->attr[1] = strdup(""); /* empty list of malloced names/vals*/
|
||||
}
|
||||
else xml->attr = realloc(xml->attr, (l + 4) * sizeof(char *));
|
||||
else xml->attr = realloc(xml->attr, (l + 4) * sizeof(char* ));
|
||||
|
||||
xml->attr[l] = (char *)name; /* set attribute name*/
|
||||
xml->attr[l] = (char* )name; /* set attribute name*/
|
||||
xml->attr[l + 2] = NULL; /* null terminate attribute list*/
|
||||
xml->attr[l + 3] = realloc(xml->attr[l + 1],
|
||||
(c = strlen(xml->attr[l + 1])) + 2);
|
||||
strcpy(xml->attr[l + 3] + c, " "); /* set name/value as not malloced*/
|
||||
if (xml->flags & EZXML_DUP) xml->attr[l + 3][c] = EZXML_NAMEM;
|
||||
}
|
||||
else if (xml->flags & EZXML_DUP) free((char *)name); /* name was strduped*/
|
||||
else if (xml->flags & EZXML_DUP) free((char* )name); /* name was strduped*/
|
||||
|
||||
for (c = l; xml->attr[c]; c += 2); /* find end of attribute list*/
|
||||
if (xml->attr[c + 1][l / 2] & EZXML_TXTM) free(xml->attr[l + 1]); /*old val*/
|
||||
if (xml->flags & EZXML_DUP) xml->attr[c + 1][l / 2] |= EZXML_TXTM;
|
||||
else xml->attr[c + 1][l / 2] &= ~EZXML_TXTM;
|
||||
|
||||
if (value) xml->attr[l + 1] = (char *)value; /* set attribute value*/
|
||||
if (value) xml->attr[l + 1] = (char* )value; /* set attribute value*/
|
||||
else { /* remove attribute*/
|
||||
if (xml->attr[c + 1][l / 2] & EZXML_NAMEM) free(xml->attr[l]);
|
||||
memmove(xml->attr + l, xml->attr + l + 2, (c - l + 2) * sizeof(char*));
|
||||
xml->attr = realloc(xml->attr, (c + 2) * sizeof(char *));
|
||||
xml->attr = realloc(xml->attr, (c + 2) * sizeof(char* ));
|
||||
memmove(xml->attr[c + 1] + (l / 2), xml->attr[c + 1] + (l / 2) + 1,
|
||||
(c / 2) - (l / 2)); /* fix list of which name/vals are malloced*/
|
||||
}
|
||||
@ -917,15 +1003,9 @@ ezxml_t ezxml_set_attr(ezxml_t xml, const char *name, const char *value)
|
||||
return xml;
|
||||
}
|
||||
|
||||
/* sets a flag for the given tag and returns the tag*/
|
||||
ezxml_t ezxml_set_flag(ezxml_t xml, short flag)
|
||||
{
|
||||
if (xml) xml->flags |= flag;
|
||||
return xml;
|
||||
}
|
||||
|
||||
/* removes a tag along with its subtags without freeing its memory*/
|
||||
ezxml_t ezxml_cut(ezxml_t xml)
|
||||
static ezxml_t
|
||||
ezxml_cut(ezxml_t xml)
|
||||
{
|
||||
ezxml_t cur;
|
||||
|
||||
@ -957,21 +1037,4 @@ ezxml_t ezxml_cut(ezxml_t xml)
|
||||
xml->ordered = xml->sibling = xml->next = NULL;
|
||||
return xml;
|
||||
}
|
||||
|
||||
#ifdef EZXML_TEST /* test harness*/
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
ezxml_t xml;
|
||||
char *s;
|
||||
int i;
|
||||
|
||||
if (argc != 2) return fprintf(stderr, "usage: %s xmlfile\n", argv[0]);
|
||||
|
||||
xml = ezxml_parse_file(argv[1]);
|
||||
printf("%s\n", (s = ezxml_toxml(xml)));
|
||||
free(s);
|
||||
i = fprintf(stderr, "%s", ezxml_error(xml));
|
||||
ezxml_free(xml);
|
||||
return (i) ? 1 : 0;
|
||||
}
|
||||
#endif /* EZXML_TEST*/
|
||||
#endif
|
@ -177,13 +177,13 @@ ncbytesextract(NCbytes* bb)
|
||||
}
|
||||
|
||||
int
|
||||
ncbytessetcontents(NCbytes* bb, char* contents, unsigned long alloc)
|
||||
ncbytessetcontents(NCbytes* bb, void* contents, size_t alloc)
|
||||
{
|
||||
if(bb == NULL) return ncbytesfail();
|
||||
ncbytesclear(bb);
|
||||
if(!bb->nonextendible && bb->content != NULL) free(bb->content);
|
||||
bb->content = contents;
|
||||
bb->length = 0;
|
||||
bb->content = (char*)contents;
|
||||
bb->length = alloc;
|
||||
bb->alloc = alloc;
|
||||
bb->nonextendible = 1;
|
||||
return 1;
|
||||
|
@ -109,7 +109,7 @@ nclistsetlength(NClist* l, size_t newlen)
|
||||
}
|
||||
|
||||
void*
|
||||
nclistget(NClist* l, size_t index)
|
||||
nclistget(const NClist* l, size_t index)
|
||||
{
|
||||
if(l == NULL || l->length == 0) return NULL;
|
||||
if(index >= l->length) return NULL;
|
||||
@ -260,7 +260,7 @@ nclistunique(NClist* l)
|
||||
/* Duplicate a list and if deep is true, assume the contents
|
||||
are char** and duplicate those also */
|
||||
NClist*
|
||||
nclistclone(NClist* l, int deep)
|
||||
nclistclone(const NClist* l, int deep)
|
||||
{
|
||||
NClist* clone = NULL;
|
||||
if(l == NULL) goto done;
|
||||
|
@ -485,6 +485,15 @@ ncurisetprotocol(NCURI* duri,const char* protocol)
|
||||
return (NC_NOERR);
|
||||
}
|
||||
|
||||
/* Replace the host */
|
||||
int
|
||||
ncurisethost(NCURI* duri,const char* host)
|
||||
{
|
||||
nullfree(duri->host);
|
||||
duri->host = strdup(host);
|
||||
return (NC_NOERR);
|
||||
}
|
||||
|
||||
/* Replace the path */
|
||||
int
|
||||
ncurisetpath(NCURI* duri,const char* newpath)
|
||||
@ -989,6 +998,38 @@ ncuridecodepartial(const char* s, const char* decodeset)
|
||||
return decoded;
|
||||
}
|
||||
|
||||
/* Deep clone a uri */
|
||||
NCURI*
|
||||
ncuriclone(NCURI* uri)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
NCURI* newuri = NULL;
|
||||
|
||||
/* make sure fragments and query are up to date */
|
||||
if((stat=ensurefraglist(uri))) goto done;
|
||||
if((stat=ensurequerylist(uri))) goto done;
|
||||
|
||||
if((newuri = (NCURI*)calloc(1,sizeof(NCURI)))==NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
*newuri = *uri; /* copy */
|
||||
/* deep clone fields */
|
||||
|
||||
newuri->uri = nulldup(uri->uri);
|
||||
newuri->protocol = nulldup(uri->protocol);
|
||||
newuri->user = nulldup(uri->user);
|
||||
newuri->password = nulldup(uri->password);
|
||||
newuri->host = nulldup(uri->host);
|
||||
newuri->port = nulldup(uri->port);
|
||||
newuri->path = nulldup(uri->path);
|
||||
newuri->query = nulldup(uri->query);
|
||||
newuri->fragment = nulldup(uri->fragment);
|
||||
/* make these be rebuilt */
|
||||
newuri->fraglist = NULL;
|
||||
newuri->querylist = NULL;
|
||||
done:
|
||||
return newuri;
|
||||
}
|
||||
|
||||
static int
|
||||
collectprefixparams(char* text, char** nextp)
|
||||
{
|
||||
|
@ -332,8 +332,11 @@ H5FD_http_open( const char *name, unsigned flags, hid_t /*UNUSED*/ fapl_id,
|
||||
/* Always read-only */
|
||||
write_access = 0;
|
||||
|
||||
/* Open file in read-only mode, to check for existence and get length */
|
||||
if((ncstat = nc_http_open(name,&state,&len))) {
|
||||
/* Open file in read-only mode, to check for existence and get length */
|
||||
if((ncstat = nc_http_init(&state))) {
|
||||
H5Epush_ret(func, H5E_ERR_CLS, H5E_IO, H5E_CANTOPENFILE, "cannot access object", NULL);
|
||||
}
|
||||
if((ncstat = nc_http_size(state,name,&len))) {
|
||||
H5Epush_ret(func, H5E_ERR_CLS, H5E_IO, H5E_CANTOPENFILE, "cannot access object", NULL);
|
||||
}
|
||||
|
||||
|
@ -69,12 +69,6 @@ extern int NC4_open_image_file(NC_FILE_INFO_T* h5);
|
||||
/* Defined later in this file. */
|
||||
static int rec_read_metadata(NC_GRP_INFO_T *grp);
|
||||
|
||||
#ifdef ENABLE_BYTERANGE
|
||||
#ifdef ENABLE_HDF5_ROS3
|
||||
static int ros3info(NCauth** auth, NCURI* uri, char** hostportp, char** regionp);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @internal Struct to track HDF5 object info, for
|
||||
* rec_read_metadata(). We get this info for every object in the
|
||||
@ -858,28 +852,39 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
|
||||
NCURI* uri = NULL;
|
||||
H5FD_ros3_fapl_t fa;
|
||||
char* hostport = NULL;
|
||||
char* region = NULL;
|
||||
const char* profile0 = NULL;
|
||||
const char* awsaccessid0 = NULL;
|
||||
const char* awssecretkey0 = NULL;
|
||||
const char* awsregion0 = NULL;
|
||||
|
||||
ncuriparse(path,&uri);
|
||||
if(uri == NULL)
|
||||
BAIL(NC_EINVAL);
|
||||
if((ros3info(&h5->http.auth,uri,&hostport,®ion)))
|
||||
BAIL(NC_EINVAL);
|
||||
ncurifree(uri); uri = NULL;
|
||||
hostport = NC_combinehostport(uri);
|
||||
if((retval = NC_getactives3profile(uri,&profile0)))
|
||||
BAIL(retval);
|
||||
fa.version = 1;
|
||||
fa.aws_region[0] = '\0';
|
||||
fa.secret_id[0] = '\0';
|
||||
fa.secret_key[0] = '\0';
|
||||
if(h5->http.auth->s3creds.accessid == NULL || h5->http.auth->s3creds.secretkey == NULL) {
|
||||
if((retval = NC_s3profilelookup(profile0,AWS_ACCESS_KEY_ID,&awsaccessid0)))
|
||||
BAIL(retval);
|
||||
if((retval = NC_s3profilelookup(profile0,AWS_SECRET_ACCESS_KEY,&awssecretkey0)))
|
||||
BAIL(retval);
|
||||
if((retval = NC_s3profilelookup(profile0,AWS_REGION,&awsregion0)))
|
||||
BAIL(retval);
|
||||
if(awsaccessid0 == NULL || awssecretkey0 == NULL) {
|
||||
/* default, non-authenticating, "anonymous" fapl configuration */
|
||||
fa.authenticate = (hbool_t)0;
|
||||
} else {
|
||||
fa.authenticate = (hbool_t)1;
|
||||
strlcat(fa.aws_region,region,H5FD_ROS3_MAX_REGION_LEN);
|
||||
strlcat(fa.secret_id, h5->http.auth->s3creds.accessid, H5FD_ROS3_MAX_SECRET_ID_LEN);
|
||||
strlcat(fa.secret_key, h5->http.auth->s3creds.secretkey, H5FD_ROS3_MAX_SECRET_KEY_LEN);
|
||||
if(awsregion0)
|
||||
strlcat(fa.aws_region,awsregion0,H5FD_ROS3_MAX_REGION_LEN);
|
||||
strlcat(fa.secret_id, awsaccessid0, H5FD_ROS3_MAX_SECRET_ID_LEN);
|
||||
strlcat(fa.secret_key, awssecretkey0, H5FD_ROS3_MAX_SECRET_KEY_LEN);
|
||||
}
|
||||
nullfree(region);
|
||||
nullfree(hostport);
|
||||
ncurifree(uri); uri = NULL;
|
||||
/* create and set fapl entry */
|
||||
if(H5Pset_fapl_ros3(fapl_id, &fa) < 0)
|
||||
BAIL(NC_EHDFERR);
|
||||
@ -2786,56 +2791,6 @@ exit:
|
||||
return retval;
|
||||
}
|
||||
|
||||
#ifdef ENABLE_BYTERANGE
|
||||
#ifdef ENABLE_HDF5_ROS3
|
||||
static int
|
||||
ros3info(NCauth** authp, NCURI* uri, char** hostportp, char** regionp)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
size_t len;
|
||||
char* hostport = NULL;
|
||||
char* region = NULL;
|
||||
char* p;
|
||||
|
||||
if(uri == NULL || uri->host == NULL)
|
||||
{stat = NC_EINVAL; goto done;}
|
||||
len = strlen(uri->host);
|
||||
if(uri->port != NULL)
|
||||
len += 1+strlen(uri->port);
|
||||
len++; /* nul term */
|
||||
if((hostport = malloc(len)) == NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
hostport[0] = '\0';
|
||||
strlcat(hostport,uri->host,len);
|
||||
if(uri->port != NULL) {
|
||||
strlcat(hostport,":",len);
|
||||
strlcat(hostport,uri->port,len);
|
||||
}
|
||||
/* We only support path urls, not virtual urls, so the
|
||||
host past the first dot must be "s3.amazonaws.com" */
|
||||
p = strchr(uri->host,'.');
|
||||
if(p != NULL && strcmp(p+1,"s3.amazonaws.com")==0) {
|
||||
len = (size_t)((p - uri->host)-1);
|
||||
region = calloc(1,len+1);
|
||||
memcpy(region,uri->host,len);
|
||||
region[len] = '\0';
|
||||
} else /* cannot find region: use "" */
|
||||
region = strdup("");
|
||||
if(hostportp) {*hostportp = hostport; hostport = NULL;}
|
||||
if(regionp) {*regionp = region; region = NULL;}
|
||||
|
||||
/* Extract auth related info */
|
||||
if((stat=NC_authsetup(authp, uri)))
|
||||
goto done;
|
||||
|
||||
done:
|
||||
nullfree(hostport);
|
||||
nullfree(region);
|
||||
return stat;
|
||||
}
|
||||
#endif /*ENABLE_HDF5_ROS3*/
|
||||
#endif /*ENABLE_BYTERANGE*/
|
||||
|
||||
/**
|
||||
* Wrapper function for H5Fopen.
|
||||
* Converts the filename from ANSI to UTF-8 as needed before calling H5Fopen.
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <aws/s3/model/HeadObjectRequest.h>
|
||||
#include <aws/s3/model/CreateBucketRequest.h>
|
||||
#include <aws/s3/model/DeleteBucketRequest.h>
|
||||
#include <aws/core/auth/AWSCredentialsProvider.h>
|
||||
#include <aws/core/utils/memory/stl/AWSString.h>
|
||||
#include <aws/core/utils/logging/DefaultLogSystem.h>
|
||||
#include <aws/core/utils/logging/AWSLogging.h>
|
||||
|
@ -12,7 +12,16 @@
|
||||
#ifndef ZARR_H
|
||||
#define ZARR_H
|
||||
|
||||
typedef struct ZS3INFO {
|
||||
char* host; /* non-null if other*/
|
||||
char* region; /* region */
|
||||
char* bucket; /* bucket name */
|
||||
char* rootkey;
|
||||
char* profile;
|
||||
} ZS3INFO;
|
||||
|
||||
struct ChunkKey;
|
||||
struct S3credentials;
|
||||
|
||||
/* zarr.c */
|
||||
extern int ncz_create_dataset(NC_FILE_INFO_T*, NC_GRP_INFO_T*, const char** controls);
|
||||
@ -68,7 +77,7 @@ extern int NCZ_swapatomicdata(size_t datalen, void* data, int typesize);
|
||||
extern char** NCZ_clonestringvec(size_t len, const char** vec);
|
||||
extern void NCZ_freestringvec(size_t len, char** vec);
|
||||
extern int NCZ_create_fill_chunk(size64_t chunksize, size_t typesize, const void* fill, void** fillchunkp);
|
||||
extern int NCZ_s3clear(ZS3INFO* s3);
|
||||
extern int NCZ_s3clear(ZS3INFO* s3map);
|
||||
extern int NCZ_ischunkname(const char* name,char dimsep);
|
||||
extern char* NCZ_chunkpath(struct ChunkKey key);
|
||||
|
||||
|
@ -8,8 +8,8 @@
|
||||
#undef ZDEBUG /* general debug */
|
||||
#undef ZDEBUG1 /* detailed debug */
|
||||
|
||||
#undef ZCATCH /* Warning: significant performance impact */
|
||||
#undef ZTRACING /* Warning: significant performance impact */
|
||||
#define ZCATCH /* Warning: significant performance impact */
|
||||
#define ZTRACING /* Warning: significant performance impact */
|
||||
|
||||
#include "ncexternl.h"
|
||||
#include "nclog.h"
|
||||
|
@ -124,10 +124,9 @@ int
|
||||
NCZ_initialize(void)
|
||||
{
|
||||
int stat;
|
||||
NCZ_dispatch_table = &NCZ_dispatcher;
|
||||
#ifdef ZTRACING
|
||||
NCZ_dispatch_table = &NCZ_dispatcher_trace;
|
||||
#else
|
||||
NCZ_dispatch_table = &NCZ_dispatcher;
|
||||
#endif
|
||||
if (!ncz_initialized)
|
||||
NCZ_initialize_internal();
|
||||
|
@ -53,7 +53,7 @@
|
||||
#include "netcdf_aux.h"
|
||||
|
||||
#undef DEBUG
|
||||
#define DEBUGF
|
||||
#undef DEBUGF
|
||||
#undef DEBUGL
|
||||
|
||||
#define NULLIFY(x) ((x)?(x):"NULL")
|
||||
@ -172,8 +172,8 @@ const char*
|
||||
printplugin(const NCZ_Plugin* plugin)
|
||||
{
|
||||
static char plbuf[4096];
|
||||
char plbuf2[4096];
|
||||
char plbuf1[4096];
|
||||
char plbuf2[2000];
|
||||
char plbuf1[2000];
|
||||
|
||||
if(plugin == NULL) return "plugin=NULL";
|
||||
plbuf2[0] = '\0'; plbuf1[0] = '\0';
|
||||
@ -338,7 +338,7 @@ NCZ_addfilter(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, unsigned int id, size_t
|
||||
/* Before anything else, find the matching plugin */
|
||||
if((stat = NCZ_plugin_loaded(id,&plugin))) goto done;
|
||||
if(plugin == NULL || plugin->codec.codec == NULL) { /* fail */
|
||||
ZLOG(NCLOGERR,"no such plugin: %u",(unsigned)id);
|
||||
ZLOG(NCLOGWARN,"no such plugin: %u",(unsigned)id);
|
||||
stat = NC_ENOFILTER;
|
||||
goto done;
|
||||
}
|
||||
@ -672,7 +672,7 @@ NCZ_inq_var_filter_info(int ncid, int varid, unsigned int id, size_t* nparamsp,
|
||||
if(params && spec->hdf5.visible.nparams > 0)
|
||||
memcpy(params,spec->hdf5.visible.params,sizeof(unsigned int)*spec->hdf5.visible.nparams);
|
||||
} else {
|
||||
ZLOG(NCLOGERR,"no such filter: %u",(unsigned)id);
|
||||
ZLOG(NCLOGWARN,"no such filter: %u",(unsigned)id);
|
||||
stat = NC_ENOFILTER;
|
||||
}
|
||||
done:
|
||||
|
@ -64,13 +64,11 @@ NCZ_initialize_internal(void)
|
||||
NCRCglobalstate* ngs = NULL;
|
||||
|
||||
ncz_initialized = 1;
|
||||
/* Load the .rc file */
|
||||
if((stat=NC_rcload())) goto done;
|
||||
ngs = ncrc_getglobalstate();
|
||||
if(ngs != NULL) {
|
||||
/* Defaults */
|
||||
ngs->zarr.dimension_separator = DFALT_DIM_SEPARATOR;
|
||||
dimsep = NC_rclookup("ZARR.DIMENSION_SEPARATOR",NULL);
|
||||
dimsep = NC_rclookup("ZARR.DIMENSION_SEPARATOR",NULL,NULL);
|
||||
if(dimsep != NULL) {
|
||||
/* Verify its value */
|
||||
if(dimsep != NULL && strlen(dimsep) == 1 && islegaldimsep(dimsep[0]))
|
||||
@ -78,7 +76,6 @@ NCZ_initialize_internal(void)
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return stat;
|
||||
}
|
||||
|
||||
@ -92,6 +89,9 @@ NCZ_finalize_internal(void)
|
||||
/* Reclaim global resources */
|
||||
ncz_initialized = 0;
|
||||
NCZ_filter_finalize();
|
||||
#ifdef ENABLE_S3_SDK
|
||||
NCZ_s3finalize();
|
||||
#endif
|
||||
return NC_NOERR;
|
||||
}
|
||||
|
||||
|
@ -10,16 +10,6 @@
|
||||
/**************************************************/
|
||||
/* Import the current implementations */
|
||||
|
||||
extern NCZMAP_DS_API zmap_file;
|
||||
#ifdef USE_HDF5
|
||||
extern NCZMAP_DS_API zmap_nz4;
|
||||
#endif
|
||||
#ifdef ENABLE_NCZARR_ZIP
|
||||
extern NCZMAP_DS_API zmap_zip;
|
||||
#endif
|
||||
#ifdef ENABLE_S3_SDK
|
||||
extern NCZMAP_DS_API zmap_s3sdk;
|
||||
#endif
|
||||
|
||||
/**************************************************/
|
||||
|
||||
@ -189,36 +179,7 @@ nczm_split(const char* path, NClist* segments)
|
||||
int
|
||||
nczm_split_delim(const char* path, char delim, NClist* segments)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
const char* p = NULL;
|
||||
const char* q = NULL;
|
||||
ptrdiff_t len = 0;
|
||||
char* seg = NULL;
|
||||
|
||||
if(path == NULL || strlen(path)==0 || segments == NULL)
|
||||
{stat = NC_EINVAL; goto done;}
|
||||
|
||||
p = path;
|
||||
if(p[0] == delim) p++;
|
||||
for(;*p;) {
|
||||
q = strchr(p,delim);
|
||||
if(q==NULL)
|
||||
q = p + strlen(p); /* point to trailing nul */
|
||||
len = (q - p);
|
||||
if(len == 0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
if((seg = malloc(len+1)) == NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
memcpy(seg,p,len);
|
||||
seg[len] = '\0';
|
||||
nclistpush(segments,seg);
|
||||
seg = NULL; /* avoid mem errors */
|
||||
if(*q) p = q+1; else p = q;
|
||||
}
|
||||
|
||||
done:
|
||||
nullfree(seg);
|
||||
return THROW(stat);
|
||||
return NC_split_delim(path,delim,segments);
|
||||
}
|
||||
|
||||
/* concat the the segments with each segment preceded by '/' */
|
||||
|
@ -187,18 +187,6 @@ typedef struct NCZMAP {
|
||||
|
||||
/* zmap_s3sdk related-types and constants */
|
||||
|
||||
#define AWSHOST ".amazonaws.com"
|
||||
|
||||
enum URLFORMAT {UF_NONE=0, UF_VIRTUAL=1, UF_PATH=2, UF_OTHER=3};
|
||||
|
||||
typedef struct ZS3INFO {
|
||||
enum URLFORMAT urlformat;
|
||||
char* host; /* non-null if other*/
|
||||
char* region; /* region */
|
||||
char* bucket; /* bucket name */
|
||||
char* rootkey;
|
||||
} ZS3INFO;
|
||||
|
||||
/* Forward */
|
||||
struct NClist;
|
||||
|
||||
@ -224,6 +212,17 @@ typedef struct NCZMAP_DS_API {
|
||||
int (*open)(const char *path, int mode, size64_t constraints, void* parameters, NCZMAP** mapp);
|
||||
} NCZMAP_DS_API;
|
||||
|
||||
extern NCZMAP_DS_API zmap_file;
|
||||
#ifdef USE_HDF5
|
||||
extern NCZMAP_DS_API zmap_nz4;
|
||||
#endif
|
||||
#ifdef ENABLE_NCZARR_ZIP
|
||||
extern NCZMAP_DS_API zmap_zip;
|
||||
#endif
|
||||
#ifdef ENABLE_S3_SDK
|
||||
extern NCZMAP_DS_API zmap_s3sdk;
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -312,6 +311,10 @@ EXTERNL int nczmap_close(NCZMAP* map, int deleteit);
|
||||
EXTERNL int nczmap_create(NCZM_IMPL impl, const char *path, int mode, size64_t constraints, void* parameters, NCZMAP** mapp);
|
||||
EXTERNL int nczmap_open(NCZM_IMPL impl, const char *path, int mode, size64_t constraints, void* parameters, NCZMAP** mapp);
|
||||
|
||||
#ifdef ENABLE_S3_SDK
|
||||
EXTERNL void NCZ_s3finalize(void);
|
||||
#endif
|
||||
|
||||
/* Utility functions */
|
||||
|
||||
/** Split a path into pieces along '/' character; elide any leading '/' */
|
||||
|
@ -145,7 +145,7 @@ static int verify(const char* path, int isdir);
|
||||
#endif
|
||||
|
||||
static int zfinitialized = 0;
|
||||
static void zfinitialize(void)
|
||||
static void zfileinitialize(void)
|
||||
{
|
||||
if(!zfinitialized) {
|
||||
ZTRACE(5,NULL);
|
||||
@ -186,7 +186,7 @@ zfilecreate(const char *path, int mode, size64_t flags, void* parameters, NCZMAP
|
||||
NC_UNUSED(parameters);
|
||||
ZTRACE(5,"path=%s mode=%d flag=%llu",path,mode,flags);
|
||||
|
||||
if(!zfinitialized) zfinitialize();
|
||||
if(!zfinitialized) zfileinitialize();
|
||||
|
||||
/* Fixup mode flags */
|
||||
mode |= (NC_NETCDF4 | NC_WRITE);
|
||||
@ -256,7 +256,7 @@ zfileopen(const char *path, int mode, size64_t flags, void* parameters, NCZMAP**
|
||||
NC_UNUSED(parameters);
|
||||
ZTRACE(5,"path=%s mode=%d flags=%llu",path,mode,flags);
|
||||
|
||||
if(!zfinitialized) zfinitialize();
|
||||
if(!zfinitialized) zfileinitialize();
|
||||
|
||||
/* Fixup mode flags */
|
||||
mode = (NC_NETCDF4 | mode);
|
||||
|
@ -44,7 +44,6 @@ Notes:
|
||||
typedef struct ZS3MAP {
|
||||
NCZMAP map;
|
||||
ZS3INFO s3;
|
||||
void* s3config;
|
||||
void* s3client;
|
||||
char* errmsg;
|
||||
} ZS3MAP;
|
||||
@ -100,15 +99,13 @@ zs3initialize(void)
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void
|
||||
zs3finalize(void)
|
||||
void
|
||||
NCZ_s3finalize(void)
|
||||
{
|
||||
if(zs3initialized)
|
||||
NCZ_s3sdkfinalize();
|
||||
zs3initialized = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
zs3create(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** mapp)
|
||||
@ -126,7 +123,7 @@ zs3create(const char *path, int mode, size64_t flags, void* parameters, NCZMAP**
|
||||
|
||||
if(!zs3initialized) zs3initialize();
|
||||
|
||||
/* Build the z4 state */
|
||||
/* Build the z3 state */
|
||||
if((z3map = (ZS3MAP*)calloc(1,sizeof(ZS3MAP))) == NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
|
||||
@ -147,8 +144,7 @@ zs3create(const char *path, int mode, size64_t flags, void* parameters, NCZMAP**
|
||||
if(z3map->s3.rootkey == NULL)
|
||||
{stat = NC_EURL; goto done;}
|
||||
|
||||
if((stat=NCZ_s3sdkcreateconfig(z3map->s3.host, z3map->s3.region, &z3map->s3config))) goto done;
|
||||
if((stat = NCZ_s3sdkcreateclient(z3map->s3config,&z3map->s3client))) goto done;
|
||||
z3map->s3client = NCZ_s3sdkcreateclient(&z3map->s3);
|
||||
|
||||
{
|
||||
int exists = 0;
|
||||
@ -205,7 +201,7 @@ zs3open(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** m
|
||||
|
||||
if(!zs3initialized) zs3initialize();
|
||||
|
||||
/* Build the z4 state */
|
||||
/* Build the z3 state */
|
||||
if((z3map = (ZS3MAP*)calloc(1,sizeof(ZS3MAP))) == NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
|
||||
@ -226,8 +222,7 @@ zs3open(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** m
|
||||
if(z3map->s3.rootkey == NULL)
|
||||
{stat = NC_EURL; goto done;}
|
||||
|
||||
if((stat=NCZ_s3sdkcreateconfig(z3map->s3.host,z3map->s3.region,&z3map->s3config))) goto done;
|
||||
if((stat=NCZ_s3sdkcreateclient(z3map->s3config,&z3map->s3client))) goto done;
|
||||
z3map->s3client = NCZ_s3sdkcreateclient(&z3map->s3);
|
||||
|
||||
/* Search the root for content */
|
||||
content = nclistnew();
|
||||
@ -405,17 +400,13 @@ zs3close(NCZMAP* map, int deleteit)
|
||||
|
||||
if(deleteit)
|
||||
s3clear(z3map,z3map->s3.rootkey);
|
||||
if(z3map->s3client && z3map->s3config && z3map->s3.bucket && z3map->s3.rootkey) {
|
||||
NCZ_s3sdkclose(z3map->s3client, z3map->s3config, z3map->s3.bucket, z3map->s3.rootkey, deleteit, &z3map->errmsg);
|
||||
if(z3map->s3client && z3map->s3.bucket && z3map->s3.rootkey) {
|
||||
NCZ_s3sdkclose(z3map->s3client, &z3map->s3, deleteit, &z3map->errmsg);
|
||||
}
|
||||
reporterr(z3map);
|
||||
z3map->s3client = NULL;
|
||||
z3map->s3config = NULL;
|
||||
nullfree(z3map->s3.bucket);
|
||||
nullfree(z3map->s3.region);
|
||||
nullfree(z3map->s3.host);
|
||||
NCZ_s3clear(&z3map->s3);
|
||||
nullfree(z3map->errmsg);
|
||||
nullfree(z3map->s3.rootkey)
|
||||
nczm_clear(map);
|
||||
nullfree(map);
|
||||
return ZUNTRACE(stat);
|
||||
@ -507,7 +498,6 @@ s3clear(ZS3MAP* z3map, const char* rootkey)
|
||||
char** list = NULL;
|
||||
char** p;
|
||||
size_t nkeys = 0;
|
||||
|
||||
if(z3map->s3client && z3map->s3.bucket && rootkey) {
|
||||
if((stat = NCZ_s3sdksearch(z3map->s3client, z3map->s3.bucket, rootkey, &nkeys, &list, &z3map->errmsg)))
|
||||
goto done;
|
||||
|
@ -77,7 +77,7 @@ static void freesearchcache(char** cache);
|
||||
static int zzinitialized = 0;
|
||||
|
||||
static void
|
||||
zzinitialize(void)
|
||||
zipinitialize(void)
|
||||
{
|
||||
if(!zzinitialized) {
|
||||
ZTRACE(7,NULL);
|
||||
@ -110,7 +110,7 @@ zipcreate(const char *path, int mode, size64_t flags, void* parameters, NCZMAP**
|
||||
NC_UNUSED(parameters);
|
||||
ZTRACE(6,"path=%s mode=%d flag=%llu",path,mode,flags);
|
||||
|
||||
if(!zzinitialized) zzinitialize();
|
||||
if(!zzinitialized) zipinitialize();
|
||||
|
||||
/* Fixup mode flags */
|
||||
mode = (NC_NETCDF4 | NC_WRITE | mode);
|
||||
@ -186,7 +186,7 @@ zipopen(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** m
|
||||
NC_UNUSED(parameters);
|
||||
ZTRACE(6,"path=%s mode=%d flags=%llu",path,mode,flags);
|
||||
|
||||
if(!zzinitialized) zzinitialize();
|
||||
if(!zzinitialized) zipinitialize();
|
||||
|
||||
/* Fixup mode flags */
|
||||
mode = (NC_NETCDF4 | mode);
|
||||
|
@ -32,11 +32,23 @@ static int makes3key(const char* pathkey, const char** keyp);
|
||||
static int makes3keydir(const char* prefix, char** prefixdirp);
|
||||
static char** mergekeysets(size_t nkeys1, char** keys1, size_t nkeys2, char** keys2);
|
||||
|
||||
static const char*
|
||||
dumps3info(ZS3INFO* info)
|
||||
{
|
||||
static char text[8192];
|
||||
snprintf(text,sizeof(text),"host=%s region=%s bucket=%s rootkey=%s profile=%s",
|
||||
(info->host?info->host:"null"),
|
||||
(info->region?info->region:"null"),
|
||||
(info->bucket?info->bucket:"null"),
|
||||
(info->rootkey?info->rootkey:"null"),
|
||||
(info->profile?info->profile:"null"));
|
||||
return text;
|
||||
}
|
||||
|
||||
void
|
||||
NCZ_s3sdkinitialize(void)
|
||||
{
|
||||
ZTRACE(11,NULL);
|
||||
// zs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Error;
|
||||
Aws::InitAPI(zs3options);
|
||||
ZUNTRACE(NC_NOERR);
|
||||
}
|
||||
@ -65,39 +77,51 @@ makeerrmsg(const Aws::Client::AWSError<Aws::S3::S3Errors> err, const char* key="
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
NCZ_s3sdkcreateconfig(const char* host, const char* region, void** configp)
|
||||
static Aws::Client::ClientConfiguration*
|
||||
s3sdkcreateconfig(ZS3INFO* info)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
ZTRACE(11,"host=%s region=%s");
|
||||
ZTRACE(11,"info=%s", dumps3info(info));
|
||||
|
||||
Aws::Client::ClientConfiguration *config = new Aws::Client::ClientConfiguration();
|
||||
Aws::Client::ClientConfiguration *config;
|
||||
if(info->profile)
|
||||
config = new Aws::Client::ClientConfiguration(info->profile);
|
||||
else
|
||||
config = new Aws::Client::ClientConfiguration();
|
||||
config->scheme = Aws::Http::Scheme::HTTPS;
|
||||
config->connectTimeoutMs = 300000;
|
||||
config->requestTimeoutMs = 600000;
|
||||
if(region) config->region = region;
|
||||
if(host) config->endpointOverride = host;
|
||||
if(info->region) config->region = info->region;
|
||||
if(info->host) config->endpointOverride = info->host;
|
||||
config->enableEndpointDiscovery = true;
|
||||
#if 0
|
||||
config->followRedirects = Aws::Client::FollowRedirectsPolicy::ALWAYS;
|
||||
#endif
|
||||
if(configp) * configp = config;
|
||||
return ZUNTRACE(stat);
|
||||
ZUNTRACE(NC_NOERR);
|
||||
return config;
|
||||
}
|
||||
|
||||
int
|
||||
NCZ_s3sdkcreateclient(void* config0, void** clientp)
|
||||
void*
|
||||
NCZ_s3sdkcreateclient(ZS3INFO* info)
|
||||
{
|
||||
ZTRACE(11,NULL);
|
||||
Aws::Client::ClientConfiguration* config = (Aws::Client::ClientConfiguration*) config0;
|
||||
Aws::S3::S3Client *s3client
|
||||
= new Aws::S3::S3Client(*config,
|
||||
|
||||
Aws::Client::ClientConfiguration* config = s3sdkcreateconfig(info);
|
||||
Aws::S3::S3Client *s3client;
|
||||
|
||||
if(info->profile == NULL || strcmp(info->profile,"none")==0) {
|
||||
Aws::Auth::AWSCredentials creds;
|
||||
creds.SetAWSAccessKeyId(Aws::String(""));
|
||||
creds.SetAWSSecretKey(Aws::String(""));
|
||||
s3client = new Aws::S3::S3Client(creds,*config,
|
||||
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent,
|
||||
false,
|
||||
Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET
|
||||
);
|
||||
if(clientp) *clientp = (void*)s3client;
|
||||
return ZUNTRACE(NC_NOERR);
|
||||
false);
|
||||
} else {
|
||||
s3client = new Aws::S3::S3Client(*config,
|
||||
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent,
|
||||
false);
|
||||
}
|
||||
delete config;
|
||||
ZUNTRACE(NC_NOERR);
|
||||
return (void*)s3client;
|
||||
}
|
||||
|
||||
int
|
||||
@ -106,6 +130,7 @@ NCZ_s3sdkbucketexists(void* s3client0, const char* bucket, int* existsp, char**
|
||||
int stat = NC_NOERR;
|
||||
int exists = 0;
|
||||
Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0;
|
||||
|
||||
ZTRACE(11,"bucket=%s",bucket);
|
||||
if(errmsgp) *errmsgp = NULL;
|
||||
auto result = s3client->ListBuckets();
|
||||
@ -158,25 +183,22 @@ NCZ_s3sdkbucketcreate(void* s3client0, const char* region, const char* bucket, c
|
||||
}
|
||||
|
||||
int
|
||||
NCZ_s3sdkbucketdelete(void* s3client0, void* config0, const char* region, const char* bucket, char** errmsgp)
|
||||
NCZ_s3sdkbucketdelete(void* s3client0, ZS3INFO* info, char** errmsgp)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
|
||||
ZTRACE(11,"region=%s bucket=%s",region,bucket);
|
||||
ZTRACE(11,"info=%s%s",dumps3info(info));
|
||||
|
||||
Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0;
|
||||
Aws::Client::ClientConfiguration *config = (Aws::Client::ClientConfiguration*)config0;
|
||||
|
||||
|
||||
if(errmsgp) *errmsgp = NULL;
|
||||
const Aws::S3::Model::BucketLocationConstraint &awsregion = s3findregion(region);
|
||||
const Aws::S3::Model::BucketLocationConstraint &awsregion = s3findregion(info->region);
|
||||
if(awsregion == Aws::S3::Model::BucketLocationConstraint::NOT_SET)
|
||||
return NC_EURL;
|
||||
/* Set up the request */
|
||||
Aws::S3::Model::DeleteBucketRequest request;
|
||||
request.SetBucket(bucket);
|
||||
if(region) {
|
||||
config->region = region; // Will this work?
|
||||
}
|
||||
request.SetBucket(info->bucket);
|
||||
|
||||
#ifdef NOOP
|
||||
/* Delete the bucket */
|
||||
auto result = s3client->DeleteBucket(request);
|
||||
@ -319,25 +341,22 @@ NCZ_s3sdkwriteobject(void* s3client0, const char* bucket, const char* pathkey,
|
||||
}
|
||||
|
||||
int
|
||||
NCZ_s3sdkclose(void* s3client0, void* config0, const char* bucket, const char* rootkey, int deleteit, char** errmsgp)
|
||||
NCZ_s3sdkclose(void* s3client0, ZS3INFO* info, int deleteit, char** errmsgp)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
|
||||
ZTRACE(11,"bucket=%s rootkey=%s deleteit=%d content=%p",bucket,rootkey,deleteit);
|
||||
ZTRACE(11,"info=%s rootkey=%s deleteit=%d",dumps3info(info),deleteit);
|
||||
|
||||
Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0;
|
||||
Aws::Client::ClientConfiguration *config = (Aws::Client::ClientConfiguration*)config0;
|
||||
if(deleteit) {
|
||||
/* Delete the root key; ok it if does not exist */
|
||||
switch (stat = NCZ_s3sdkdeletekey(s3client0,bucket,rootkey,errmsgp)) {
|
||||
switch (stat = NCZ_s3sdkdeletekey(s3client0,info->bucket,info->rootkey,errmsgp)) {
|
||||
case NC_NOERR: break;
|
||||
case NC_EEMPTY: case NC_ENOTFOUND: stat = NC_NOERR; break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
delete s3client;
|
||||
delete config;
|
||||
|
||||
return ZUNTRACE(stat);
|
||||
}
|
||||
|
||||
|
@ -6,21 +6,21 @@
|
||||
#ifndef ZS3SDK_H
|
||||
#define ZS3SDK_H 1
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
EXTERNL void NCZ_s3sdkinitialize(void);
|
||||
EXTERNL void NCZ_s3sdkfinalize(void);
|
||||
EXTERNL int NCZ_s3sdkcreateconfig(const char* host, const char* reqion, void** configp);
|
||||
EXTERNL int NCZ_s3sdkcreateclient(void* config, void** clientp);
|
||||
EXTERNL void* NCZ_s3sdkcreateclient(ZS3INFO* context);
|
||||
EXTERNL int NCZ_s3sdkbucketexists(void* s3client, const char* bucket, int* existsp, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdkbucketcreate(void* s3client, const char* region, const char* bucket, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdkbucketdelete(void* s3client, const char* region, const char* bucket, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdkinfo(void* client0, const char* bucket, const char* pathkey, unsigned long long* lenp, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdkread(void* client0, const char* bucket, const char* pathkey, unsigned long long start, unsigned long long count, void* content, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdkwriteobject(void* client0, const char* bucket, const char* pathkey, unsigned long long count, const void* content, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdkclose(void* s3client0, void* config0, const char* bucket, const char* rootkey, int deleteit, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdkclose(void* s3client0, ZS3INFO* info, int deleteit, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdkgetkeys(void* s3client0, const char* bucket, const char* prefix, size_t* nkeysp, char*** keysp, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdksearch(void* s3client0, const char* bucket, const char* prefixkey0, size_t* nkeysp, char*** keysp, char** errmsgp);
|
||||
EXTERNL int NCZ_s3sdkdeletekey(void* client0, const char* bucket, const char* pathkey, char** errmsgp);
|
||||
|
@ -67,7 +67,6 @@ NCJ_INT, /*NC_UINT64*/
|
||||
};
|
||||
|
||||
/* Forward */
|
||||
static int endswith(const char* s, const char* suffix);
|
||||
|
||||
/**************************************************/
|
||||
|
||||
@ -782,88 +781,32 @@ EXTERNL int
|
||||
NCZ_s3urlprocess(NCURI* url, ZS3INFO* s3)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
NClist* segments = NULL;
|
||||
NCbytes* buf = ncbytesnew();
|
||||
NCURI* url2 = NULL;
|
||||
NClist* pathsegments = NULL;
|
||||
const char* profile0 = NULL;
|
||||
|
||||
if(url == NULL)
|
||||
{stat = NC_EURL; goto done;}
|
||||
/* do some verification */
|
||||
if(strcmp(url->protocol,"https") != 0)
|
||||
if(url == NULL || s3 == NULL)
|
||||
{stat = NC_EURL; goto done;}
|
||||
/* Get current profile */
|
||||
if((stat = NC_getactives3profile(url,&profile0))) goto done;
|
||||
if(profile0 == NULL) profile0 = "none";
|
||||
s3->profile = strdup(profile0);
|
||||
|
||||
/* Path better look absolute */
|
||||
if(!nczm_isabsolutepath(url->path))
|
||||
{stat = NC_EURL; goto done;}
|
||||
|
||||
/* Distinguish path-style from virtual-host style from other:
|
||||
Virtual: https://bucket-name.s3.Region.amazonaws.com/<root>
|
||||
Path: https://s3.Region.amazonaws.com/bucket-name/<root>
|
||||
Other: https://<host>/bucketname/<root>
|
||||
*/
|
||||
if(url->host == NULL || strlen(url->host) == 0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
if(endswith(url->host,AWSHOST)) { /* Virtual or path */
|
||||
segments = nclistnew();
|
||||
/* split the hostname by "." */
|
||||
if((stat = nczm_split_delim(url->host,'.',segments))) goto done;
|
||||
switch (nclistlength(segments)) {
|
||||
default: stat = NC_EURL; goto done;
|
||||
case 4:
|
||||
if(strcasecmp(nclistget(segments,0),"s3")!=0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
s3->urlformat = UF_PATH;
|
||||
s3->region = strdup(nclistget(segments,1));
|
||||
break;
|
||||
case 5:
|
||||
if(strcasecmp(nclistget(segments,1),"s3")!=0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
s3->urlformat = UF_VIRTUAL;
|
||||
s3->region = strdup(nclistget(segments,2));
|
||||
s3->bucket = strdup(nclistget(segments,0));
|
||||
break;
|
||||
}
|
||||
/* Rebuild host to look like path-style */
|
||||
ncbytescat(buf,"s3.");
|
||||
ncbytescat(buf,s3->region);
|
||||
ncbytescat(buf,AWSHOST);
|
||||
s3->host = ncbytesextract(buf);
|
||||
} else {
|
||||
s3->urlformat = UF_OTHER;
|
||||
if((s3->host = strdup(url->host))==NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
/* Rebuild the URL to path format and get a usable region*/
|
||||
if((stat = NC_s3urlrebuild(url,&url2,&s3->bucket,&s3->region))) goto done;
|
||||
s3->host = strdup(url2->host);
|
||||
/* construct the rootkey minus the leading bucket */
|
||||
pathsegments = nclistnew();
|
||||
if((stat = NC_split_delim(url2->path,'/',pathsegments))) goto done;
|
||||
if(nclistlength(pathsegments) > 0) {
|
||||
char* seg = nclistremove(pathsegments,0);
|
||||
nullfree(seg);
|
||||
}
|
||||
/* Do fixups to make everything look like it was path style */
|
||||
switch (s3->urlformat) {
|
||||
case UF_PATH:
|
||||
case UF_OTHER:
|
||||
/* We have to process the path to get the bucket, and remove it in the path */
|
||||
if(url->path != NULL && strlen(url->path) > 0) {
|
||||
/* split the path by "/" */
|
||||
nclistfreeall(segments);
|
||||
segments = nclistnew();
|
||||
if((stat = nczm_split_delim(url->path,'/',segments))) goto done;
|
||||
if(nclistlength(segments) == 0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
s3->bucket = ((char*)nclistremove(segments,0));
|
||||
if(nclistlength(segments) > 0) {
|
||||
if((stat = nczm_join(segments,&s3->rootkey))) goto done;
|
||||
} else
|
||||
s3->rootkey = NULL;
|
||||
nclistfreeall(segments); segments = NULL;
|
||||
}
|
||||
break;
|
||||
case UF_VIRTUAL:
|
||||
if(url->path == NULL || strlen(url->path) == 0)
|
||||
s3->rootkey = NULL;
|
||||
else
|
||||
s3->rootkey = strdup(url->path);
|
||||
break;
|
||||
default: stat = NC_EURL; goto done;
|
||||
}
|
||||
|
||||
if((stat = nczm_join(pathsegments,&s3->rootkey))) goto done;
|
||||
|
||||
done:
|
||||
ncbytesfree(buf);
|
||||
nclistfreeall(segments);
|
||||
ncurifree(url2);
|
||||
nclistfreeall(pathsegments);
|
||||
return stat;
|
||||
}
|
||||
|
||||
@ -871,27 +814,15 @@ int
|
||||
NCZ_s3clear(ZS3INFO* s3)
|
||||
{
|
||||
if(s3) {
|
||||
nullfree(s3->host);
|
||||
nullfree(s3->region);
|
||||
nullfree(s3->bucket);
|
||||
nullfree(s3->rootkey);
|
||||
nullfree(s3->host); s3->host = NULL;
|
||||
nullfree(s3->region); s3->region = NULL;
|
||||
nullfree(s3->bucket); s3->bucket = NULL;
|
||||
nullfree(s3->rootkey); s3->rootkey = NULL;
|
||||
nullfree(s3->profile); s3->profile = NULL;
|
||||
}
|
||||
return NC_NOERR;
|
||||
}
|
||||
|
||||
static int
|
||||
endswith(const char* s, const char* suffix)
|
||||
{
|
||||
ssize_t ls, lsf, delta;
|
||||
if(s == NULL || suffix == NULL) return 0;
|
||||
ls = strlen(s);
|
||||
lsf = strlen(suffix);
|
||||
delta = (ls - lsf);
|
||||
if(delta < 0) return 0;
|
||||
if(memcmp(s+delta,suffix,lsf)!=0) return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
NCZ_ischunkname(const char* name,char dimsep)
|
||||
{
|
||||
|
@ -167,7 +167,8 @@ httpio_open(const char* path,
|
||||
/* Create private data */
|
||||
if((status = httpio_new(path, ioflags, &nciop, &http))) goto done;
|
||||
/* Open the path and get curl handle and object size */
|
||||
if((status = nc_http_open(path,&http->state,&http->size))) goto done;
|
||||
if((status = nc_http_init(&http->state))) goto done;
|
||||
if((status = nc_http_size(http->state,path,&http->size))) goto done;
|
||||
|
||||
sizehint = pagesize;
|
||||
|
||||
|
@ -9,13 +9,14 @@ set -e
|
||||
#BIGTEST=1
|
||||
|
||||
# Test Urls (S3 URLS must be in path format)
|
||||
URL3="https://thredds-test.unidata.ucar.edu/thredds/fileServer/pointData/cf_dsg/example/point.nc#mode=bytes"
|
||||
#URL3="https://remotetest.unidata.ucar.edu/thredds/fileServer/testdata/2004050300_eta_211.nc#bytes"
|
||||
URL4a="https://s3.us-east-1.amazonaws.com/noaa-goes16/ABI-L1b-RadC/2017/059/03/OR_ABI-L1b-RadC-M3C13_G16_s20170590337505_e20170590340289_c20170590340316.nc#mode=bytes"
|
||||
URL4b="https://thredds-test.unidata.ucar.edu/thredds/fileServer/irma/metar/files/METAR_20170910_0000.nc#bytes"
|
||||
URL3="https://thredds-test.unidata.ucar.edu/thredds/fileServer/pointData/cf_dsg/example/point.nc#mode=bytes&aws.profile=none"
|
||||
#URL3="https://remotetest.unidata.ucar.edu/thredds/fileServer/testdata/2004050300_eta_211.nc#bytes&aws.profile=none"
|
||||
URL4a="https://s3.us-east-1.amazonaws.com/noaa-goes16/ABI-L1b-RadC/2017/059/03/OR_ABI-L1b-RadC-M3C13_G16_s20170590337505_e20170590340289_c20170590340316.nc#mode=bytes&aws.profile=none"
|
||||
URL4b="https://thredds-test.unidata.ucar.edu/thredds/fileServer/irma/metar/files/METAR_20170910_0000.nc#bytes&aws.profile=none"
|
||||
URL4c="s3://noaa-goes16/ABI-L1b-RadC/2017/059/03/OR_ABI-L1b-RadC-M3C13_G16_s20170590337505_e20170590340289_c20170590340316.nc#mode=bytes&aws.profile=none"
|
||||
# Do not use unless we know it has some permanence (note the segment 'testing' in the URL);
|
||||
if test "x$BIGTEST" = x1 ; then
|
||||
URL4c="https://s3.us-west-2.amazonaws.com/coawst-public/testing/HadCRUT.4.6.0.0.median.nc#mode=bytes"
|
||||
URL4c="https://s3.us-west-2.amazonaws.com/coawst-public/testing/HadCRUT.4.6.0.0.median.nc#mode=bytes,&aws.profile=none"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
@ -7,10 +7,10 @@
|
||||
int
|
||||
main(int argc, char** argv)
|
||||
{
|
||||
size_t i,ntriples = 0;
|
||||
size_t i,nentries = 0;
|
||||
NCRCglobalstate* ngs = ncrc_getglobalstate();
|
||||
NCRCinfo* info = NULL;
|
||||
NCTriple* triple = NULL;
|
||||
NCRCentry* entry = NULL;
|
||||
|
||||
/* Cause the .rc files to be read and merged */
|
||||
nc_initialize();
|
||||
@ -23,17 +23,21 @@ main(int argc, char** argv)
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/* Print out the .rc triples */
|
||||
if((ntriples = NC_rcfile_length(info))==0) {
|
||||
/* Print out the .rc entries */
|
||||
if((nentries = NC_rcfile_length(info))==0) {
|
||||
printf("<empty>\n");
|
||||
exit(0);
|
||||
}
|
||||
for(i=0;i<ntriples;i++) {
|
||||
triple = NC_rcfile_ith(info,i);
|
||||
if(triple == NULL) abort();
|
||||
if(triple->host != NULL)
|
||||
printf("[%s] ",triple->host);
|
||||
printf("|%s|->|%s|\n",triple->key,triple->value);
|
||||
for(i=0;i<nentries;i++) {
|
||||
entry = NC_rcfile_ith(info,i);
|
||||
if(entry == NULL) abort();
|
||||
if(entry->host != NULL) {
|
||||
printf("[%s ",entry->host);
|
||||
if(entry->path != NULL)
|
||||
printf("/%s] ",entry->path);
|
||||
printf("]");
|
||||
}
|
||||
printf("|%s|->|%s|\n",entry->key,entry->value);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -10,14 +10,14 @@ set -e
|
||||
testset() {
|
||||
# Which test cases to exercise
|
||||
testapi $1
|
||||
#testng $1
|
||||
#testncp $1
|
||||
#testunk $1
|
||||
#testngc $1
|
||||
#testmisc $1
|
||||
#testmulti $1
|
||||
#testrep $1
|
||||
#testorder $1
|
||||
testng $1
|
||||
testncp $1
|
||||
testunk $1
|
||||
testngc $1
|
||||
testmisc $1
|
||||
testmulti $1
|
||||
testrep $1
|
||||
testorder $1
|
||||
}
|
||||
|
||||
# Function to remove selected -s attributes from file;
|
||||
@ -65,8 +65,8 @@ if ! test -f ${MISCPATH} ; then echo "Unable to locate ${MISCPATH}"; exit 1; fi
|
||||
testapi() {
|
||||
zext=$1
|
||||
echo "*** Testing dynamic filters using API for map=$zext"
|
||||
deletemap $zext tmp_api
|
||||
fileargs tmp_api
|
||||
deletemap $zext $file
|
||||
${execdir}/testfilter $fileurl
|
||||
${NCDUMP} -s -n bzip2 $fileurl > ./tmp_api_$zext.txt
|
||||
# Remove irrelevant -s output
|
||||
@ -78,8 +78,8 @@ echo "*** Pass: API dynamic filter for map=$zext"
|
||||
testmisc() {
|
||||
zext=$1
|
||||
echo "*** Testing dynamic filters parameter passing for map $zext"
|
||||
deletemap $zext tmp_misc
|
||||
fileargs tmp_misc
|
||||
deletemap $zext $file
|
||||
${execdir}/testfilter_misc $fileurl
|
||||
# Verify the parameters via ncdump
|
||||
${NCDUMP} -s $fileurl > ./tmp_misc_$zext.txt
|
||||
@ -98,8 +98,8 @@ echo "*** Pass: parameter passing for map $zext"
|
||||
testng() {
|
||||
zext=$1
|
||||
echo "*** Testing dynamic filters using ncgen for map $zext"
|
||||
deletemap $zext tmp_misc
|
||||
fileargs tmp_misc
|
||||
deletemap $zext $file
|
||||
${NCGEN} -lb -4 -o $fileurl ${srcdir}/../nc_test4/bzip2.cdl
|
||||
${NCDUMP} -s -n bzip2 $fileurl > ./tmp_ng_$zext.txt
|
||||
# Remove irrelevant -s output
|
||||
@ -111,8 +111,8 @@ echo "*** Pass: ncgen dynamic filter for map $zext"
|
||||
testncp() {
|
||||
zext=$1
|
||||
echo "*** Testing dynamic filters using nccopy for map $zext"
|
||||
deletemap $zext tmp_misc
|
||||
fileargs tmp_unfiltered
|
||||
deletemap $zext $file
|
||||
# Create our input test files
|
||||
${NCGEN} -4 -lb -o $fileurl ${srcdir}/../nc_test4/ref_unfiltered.cdl
|
||||
fileurl0=$fileurl
|
||||
@ -128,8 +128,8 @@ echo " *** Pass: nccopy simple filter for map $zext"
|
||||
testunk() {
|
||||
zext=$1
|
||||
echo "*** Testing access to filter info when filter implementation is not available for map $zext"
|
||||
deletemap $zext tmp_known
|
||||
fileargs tmp_known
|
||||
deletemap $zext $file
|
||||
# build bzip2.nc
|
||||
${NCGEN} -lb -4 -o $fileurl ${srcdir}/../nc_test4/bzip2.cdl
|
||||
# dump and clean bzip2.nc header when filter is avail
|
||||
@ -151,8 +151,8 @@ echo "*** Pass: ncgen dynamic filter for map $zext"
|
||||
testngc() {
|
||||
zext=$1
|
||||
echo "*** Testing dynamic filters using ncgen with -lc for map $zext"
|
||||
deletemap $zext tmp_ngc
|
||||
fileargs tmp_ngc
|
||||
deletemap $zext $file
|
||||
${NCGEN} -lc -4 ${srcdir}/../nc_test4/bzip2.cdl > tmp_ngc.c
|
||||
diff -b -w ${srcdir}/../nc_test4/../nc_test4/ref_bzip2.c ./tmp_ngc.c
|
||||
echo "*** Pass: ncgen dynamic filter for map $zext"
|
||||
@ -161,8 +161,8 @@ echo "*** Pass: ncgen dynamic filter for map $zext"
|
||||
testmulti() {
|
||||
zext=$1
|
||||
echo "*** Testing multiple filters for map $zext"
|
||||
deletemap $zext tmp_multi
|
||||
fileargs tmp_multi
|
||||
deletemap $zext $file
|
||||
${execdir}/testfilter_multi $fileurl
|
||||
${NCDUMP} -hs -n multifilter $fileurl >./tmp_multi_$zext.cdl
|
||||
# Remove irrelevant -s output
|
||||
@ -174,8 +174,8 @@ echo "*** Pass: multiple filters for map $zext"
|
||||
testrep() {
|
||||
zext=$1
|
||||
echo "*** Testing filter re-definition invocation for map $zext"
|
||||
deletemap $zext tmp_rep
|
||||
fileargs tmp_rep
|
||||
deletemap $zext $file
|
||||
${execdir}/testfilter_repeat $fileurl >tmp_rep_$zext.txt
|
||||
diff -b -w ${srcdir}/../nc_test4/ref_filter_repeat.txt tmp_rep_$zext.txt
|
||||
}
|
||||
@ -183,8 +183,8 @@ diff -b -w ${srcdir}/../nc_test4/ref_filter_repeat.txt tmp_rep_$zext.txt
|
||||
testorder() {
|
||||
zext=$1
|
||||
echo "*** Testing multiple filter order of invocation on create for map $zext"
|
||||
deletemap $zext tmp_order
|
||||
fileargs tmp_order
|
||||
deletemap $zext $file
|
||||
${execdir}/testfilter_order create $fileurl >tmp_order_$zext.txt
|
||||
diff -b -w ${srcdir}/../nc_test4/ref_filter_order_create.txt tmp_order_$zext.txt
|
||||
echo "*** Testing multiple filter order of invocation on read for map $zext"
|
||||
|
@ -23,13 +23,13 @@ diff -b ${srcdir}/ref_purezarr.cdl tmp_purezarr_${zext}.cdl
|
||||
|
||||
echo "*** Test: xarray zarr write then read; format=$zext"
|
||||
fileargs tmp_xarray "mode=zarr,$zext"
|
||||
deletemap $zext $file
|
||||
#deletemap $zext $file
|
||||
${NCGEN} -4 -b -o "$fileurl" $srcdir/ref_purezarr_base.cdl
|
||||
${NCDUMP} $fileurl > tmp_xarray_${zext}.cdl
|
||||
diff -b ${srcdir}/ref_xarray.cdl tmp_xarray_${zext}.cdl
|
||||
|
||||
echo "*** Test: pure zarr reading nczarr; format=$zext"
|
||||
fileargs tmp_nczarr "mode=nczarr,$zext"
|
||||
|
||||
deletemap $zext $file
|
||||
${NCGEN} -4 -b -o "$fileurl" $srcdir/ref_whole.cdl
|
||||
fileargs tmp_nczarr "mode=zarr,$zext"
|
||||
|
@ -92,6 +92,7 @@ if test "x$FEATURE_NCZARR_ZIP" = xyes ; then
|
||||
fi
|
||||
if test "x$FEATURE_S3TESTS" = xyes ; then
|
||||
echo ""; echo "*** Test zmap_s3sdk"
|
||||
export PROFILE="-p stratus"
|
||||
testmapcreate s3; testmapmeta s3; testmapdata s3; testmapsearch s3
|
||||
fi
|
||||
}
|
||||
|
@ -61,11 +61,11 @@ struct Dumpptions {
|
||||
char* rootkey; /* from url | key */
|
||||
nc_type nctype; /* for printing content */
|
||||
char* filename;
|
||||
char* profile;
|
||||
} dumpoptions;
|
||||
|
||||
struct S3SDK {
|
||||
ZS3INFO s3;
|
||||
void* s3config;
|
||||
void* s3client;
|
||||
char* errmsg;
|
||||
} s3sdk;
|
||||
@ -108,6 +108,7 @@ s3initialize(void)
|
||||
static void
|
||||
s3finalize(void)
|
||||
{
|
||||
NCZ_s3clear(&s3sdk.s3);
|
||||
NCZ_s3sdkfinalize();
|
||||
}
|
||||
|
||||
@ -115,9 +116,7 @@ static int
|
||||
s3setup(void)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
if((stat=NCZ_s3sdkcreateconfig(s3sdk.s3.host, s3sdk.s3.region, &s3sdk.s3config))) goto done;
|
||||
if((stat = NCZ_s3sdkcreateclient(s3sdk.s3config,&s3sdk.s3client))) goto done;
|
||||
done:
|
||||
s3sdk.s3client = NCZ_s3sdkcreateclient(&s3sdk.s3);
|
||||
return stat;
|
||||
}
|
||||
|
||||
@ -125,7 +124,7 @@ static int
|
||||
s3shutdown(int deleteit)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
stat = NCZ_s3sdkclose(s3sdk.s3client, s3sdk.s3config, s3sdk.s3.bucket, s3sdk.s3.rootkey, deleteit, &s3sdk.errmsg);
|
||||
stat = NCZ_s3sdkclose(s3sdk.s3client, &s3sdk.s3, deleteit, &s3sdk.errmsg);
|
||||
return stat;
|
||||
}
|
||||
|
||||
@ -140,7 +139,7 @@ main(int argc, char** argv)
|
||||
|
||||
dumpoptions.nctype = NC_UBYTE; /* default */
|
||||
|
||||
while ((c = getopt(argc, argv, "df:k:u:vt:T:")) != EOF) {
|
||||
while ((c = getopt(argc, argv, "df:k:p:t:T:u:v")) != EOF) {
|
||||
switch(c) {
|
||||
case 'd':
|
||||
dumpoptions.debug = 1;
|
||||
@ -160,6 +159,9 @@ main(int argc, char** argv)
|
||||
memcpy(dumpoptions.key,optarg,strlen(optarg));
|
||||
dumpoptions.key[len] = '\0';
|
||||
} break;
|
||||
case 'p':
|
||||
dumpoptions.profile = strdup(optarg);
|
||||
break;
|
||||
case 't':
|
||||
dumpoptions.nctype = typefor(optarg);
|
||||
break;
|
||||
@ -203,19 +205,14 @@ main(int argc, char** argv)
|
||||
|
||||
dumpoptions.s3op = decodeop(argv[0]);
|
||||
|
||||
s3initialize();
|
||||
|
||||
memset(&s3sdk,0,sizeof(s3sdk));
|
||||
|
||||
if((stat = NCZ_s3urlprocess(dumpoptions.url, &s3sdk.s3))) goto done;
|
||||
if(s3sdk.s3.rootkey != NULL && dumpoptions.key != NULL) {
|
||||
size_t len = 0;
|
||||
/* Make the root key be the concatenation of rootkey+dumpoptions.key */
|
||||
len = strlen(s3sdk.s3.rootkey) + strlen(dumpoptions.key) + 1;
|
||||
if((tmp = (char*)malloc(len+1))==NULL) {stat = NC_ENOMEM; goto done;}
|
||||
tmp[0] = '\0';
|
||||
strcat(tmp,s3sdk.s3.rootkey);
|
||||
if(s3sdk.s3.rootkey[strlen(s3sdk.s3.rootkey)-1] != '/' && dumpoptions.key[0] != '/')
|
||||
strcat(tmp,"/");
|
||||
strcat(tmp,dumpoptions.key);
|
||||
if((stat = nczm_concat(s3sdk.s3.rootkey,dumpoptions.key,&tmp))) goto done;
|
||||
nullfree(s3sdk.s3.rootkey);
|
||||
s3sdk.s3.rootkey = tmp; tmp = NULL;
|
||||
} else if(dumpoptions.key != NULL) {
|
||||
@ -225,8 +222,6 @@ main(int argc, char** argv)
|
||||
if(s3sdk.s3.rootkey == NULL || strlen(s3sdk.s3.rootkey)==0)
|
||||
s3sdk.s3.rootkey = strdup("/");
|
||||
|
||||
s3initialize();
|
||||
|
||||
switch (dumpoptions.s3op) {
|
||||
default:
|
||||
fprintf(stderr,"Default action: list\n");
|
||||
|
@ -4,18 +4,21 @@ if test "x$SETX" != x; then set -x; fi
|
||||
|
||||
# Figure out which cloud repo to use
|
||||
if test "x$NCZARR_S3_TEST_HOST" = x ; then
|
||||
export NCZARR_S3_TEST_HOST=stratus.ucar.edu
|
||||
# export NCZARR_S3_TEST_HOST=stratus.ucar.edu
|
||||
export NCZARR_S3_TEST_HOST=s3.us-east-1.amazonaws.com
|
||||
fi
|
||||
if test "x$NCZARR_S3_TEST_BUCKET" = x ; then
|
||||
export NCZARR_S3_TEST_BUCKET=unidata-netcdf-zarr-testing
|
||||
# export NCZARR_S3_TEST_BUCKET=unidata-netcdf-zarr-testing
|
||||
export NCZARR_S3_TEST_BUCKET=unidata-zarr-test-data
|
||||
fi
|
||||
export NCZARR_S3_TEST_URL="https://${NCZARR_S3_TEST_HOST}/${NCZARR_S3_TEST_BUCKET}"
|
||||
|
||||
ZMD="${execdir}/zmapio"
|
||||
|
||||
awsdelete() {
|
||||
${execdir}/s3util -u "${NCZARR_S3_TEST_URL}/" -k "$1" clear
|
||||
#aws s3api delete-object --endpoint-url=https://${NCZARR_S3_TEST_HOST} --bucket=${NCZARR_S3_TEST_BUCKET} --key="netcdf-c/$1"
|
||||
${execdir}/s3util ${PROFILE} -u "${NCZARR_S3_TEST_URL}" -k "$1" clear
|
||||
# aws s3api delete-object --endpoint-url=https://${NCZARR_S3_TEST_HOST} --bucket=${NCZARR_S3_TEST_BUCKET} --key="netcdf-c/$1"
|
||||
X=
|
||||
}
|
||||
|
||||
# Check settings
|
||||
|
@ -68,7 +68,7 @@ main(int argc, char** argv)
|
||||
utoptions.output = tmp;
|
||||
|
||||
impl = kind2impl(utoptions.kind);
|
||||
url = makeurl(utoptions.file,impl);
|
||||
url = makeurl(utoptions.file,impl,&utoptions);
|
||||
|
||||
if((stat = runtests((const char**)utoptions.cmds,tests))) goto done;
|
||||
|
||||
|
@ -64,8 +64,7 @@ main(int argc, char** argv)
|
||||
utoptions.output = tmp;
|
||||
|
||||
impl = kind2impl(utoptions.kind);
|
||||
// if(impl == NCZM_S3) setkeyprefix(utoptions.file);
|
||||
url = makeurl(utoptions.file,impl);
|
||||
url = makeurl(utoptions.file,impl,&utoptions);
|
||||
|
||||
if((stat = runtests((const char**)utoptions.cmds,tests))) goto done;
|
||||
|
||||
|
@ -56,7 +56,7 @@ ut_init(int argc, char** argv, struct UTOptions * options)
|
||||
if(options != NULL) {
|
||||
options->dimdefs = nclistnew();
|
||||
options->vardefs = nclistnew();
|
||||
while ((c = getopt(argc, argv, "T:Dx:f:o:k:d:v:s:W:")) != EOF) {
|
||||
while ((c = getopt(argc, argv, "T:Dx:f:o:p:k:d:v:s:W:")) != EOF) {
|
||||
switch(c) {
|
||||
case 'T':
|
||||
nctracelevel(atoi(optarg));
|
||||
@ -73,6 +73,9 @@ ut_init(int argc, char** argv, struct UTOptions * options)
|
||||
case 'o':
|
||||
options->output = strdup(optarg);
|
||||
break;
|
||||
case 'p':
|
||||
options->profile = strdup(optarg);
|
||||
break;
|
||||
case 'k': /*implementation*/
|
||||
options->kind = strdup(optarg);
|
||||
break;
|
||||
@ -188,7 +191,7 @@ nccheck(int stat, int line)
|
||||
}
|
||||
|
||||
char*
|
||||
makeurl(const char* file, NCZM_IMPL impl)
|
||||
makeurl(const char* file, NCZM_IMPL impl, struct UTOptions* options)
|
||||
{
|
||||
char* url = NULL;
|
||||
NCbytes* buf = ncbytesnew();
|
||||
@ -214,10 +217,14 @@ makeurl(const char* file, NCZM_IMPL impl)
|
||||
case NCZM_S3:
|
||||
/* Assume that we have a complete url */
|
||||
if(ncuriparse(file,&uri)) return NULL;
|
||||
if(strcasecmp(uri->protocol,"s3")==0)
|
||||
ncurisetprotocol(uri,"https");
|
||||
if(strcasecmp(uri->protocol,"http")!=0 && strcasecmp(uri->protocol,"https")!=0)
|
||||
return NULL;
|
||||
if(options->profile) {
|
||||
const char* profile = ncurifragmentlookup(uri,"aws.profile");
|
||||
if(profile == NULL) {
|
||||
ncurisetfragmentkey(uri,"aws.profile",options->profile);
|
||||
/* rebuild the url */
|
||||
file = (const char*)ncuribuild(uri,NULL,NULL,NCURIALL); /* BAD but simple */
|
||||
}
|
||||
}
|
||||
ncbytescat(buf,file);
|
||||
break;
|
||||
default: abort();
|
||||
|
@ -46,6 +46,7 @@ struct UTOptions {
|
||||
char* file;
|
||||
char* output;
|
||||
char* kind;
|
||||
char* profile;
|
||||
NCZChunkRange ranges[NC_MAX_VAR_DIMS];
|
||||
int nslices;
|
||||
NCZSlice slices[NC_MAX_VAR_DIMS];
|
||||
@ -68,7 +69,7 @@ extern void usage(int err);
|
||||
extern int ut_init(int argc, char** argv, struct UTOptions* test);
|
||||
|
||||
extern void nccheck(int stat, int line);
|
||||
extern char* makeurl(const char* file,NCZM_IMPL);
|
||||
extern char* makeurl(const char* file, NCZM_IMPL, struct UTOptions*);
|
||||
//extern int setup(int argc, char** argv);
|
||||
extern struct Test* findtest(const char* cmd, struct Test* tests);
|
||||
extern int runtests(const char** cmds, struct Test* tests);
|
||||
|
@ -240,7 +240,7 @@ oc_curl_debug(OCstate* state)
|
||||
int
|
||||
ocrc_netrc_required(OCstate* state)
|
||||
{
|
||||
char* netrcfile = NC_rclookup(NETRCFILETAG,state->uri->uri);
|
||||
char* netrcfile = NC_rclookup(NETRCFILETAG,state->uri->uri,NULL);
|
||||
return (netrcfile != NULL || state->auth->curlflags.netrc != NULL ? 0 : 1);
|
||||
}
|
||||
|
||||
|
@ -85,9 +85,6 @@ ocinternalinitialize(void)
|
||||
/* Compute some xdr related flags */
|
||||
xxdr_init();
|
||||
|
||||
/* Make sure that the rc file has been loaded */
|
||||
(void)NC_rcload();
|
||||
|
||||
return OCTHROW(stat);
|
||||
}
|
||||
|
||||
@ -489,7 +486,7 @@ ocget_rcproperties(OCstate* state)
|
||||
OCerror ocerr = OC_NOERR;
|
||||
char* option = NULL;
|
||||
#ifdef HAVE_CURLOPT_BUFFERSIZE
|
||||
option = NC_rclookup(OCBUFFERSIZE,state->uri->uri);
|
||||
option = NC_rclookup(OCBUFFERSIZE,state->uri->uri,NULL);
|
||||
if(option != NULL && strlen(option) != 0) {
|
||||
long bufsize;
|
||||
if(strcasecmp(option,"max")==0)
|
||||
@ -500,7 +497,7 @@ ocget_rcproperties(OCstate* state)
|
||||
}
|
||||
#endif
|
||||
#ifdef HAVE_CURLOPT_KEEPALIVE
|
||||
option = NC_rclookup(OCKEEPALIVE,state->uri->uri);
|
||||
option = NC_rclookup(OCKEEPALIVE,state->uri->uri,NULL);
|
||||
if(option != NULL && strlen(option) != 0) {
|
||||
/* The keepalive value is of the form 0 or n/m,
|
||||
where n is the idle time and m is the interval time;
|
||||
|
@ -231,8 +231,8 @@ size_t blosc_filter(unsigned flags, size_t cd_nelmts,
|
||||
int doshuffle = 1; /* Shuffle default */
|
||||
int compcode; /* Blosc compressor */
|
||||
int code;
|
||||
char* compname = "blosclz"; /* The compressor by default */
|
||||
char* complist = NULL;
|
||||
const char* compname = "blosclz"; /* The compressor by default */
|
||||
const char* complist = NULL;
|
||||
|
||||
/* Filter params that are always set */
|
||||
typesize = cd_values[2]; /* The datatype size */
|
||||
@ -592,7 +592,7 @@ NCZ_blosc_hdf5_to_codec(size_t nparams, const unsigned* params, char** codecp)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
char json[1024];
|
||||
char* compname = NULL;
|
||||
const char* compname = NULL;
|
||||
|
||||
if(nparams == 0 || params == NULL)
|
||||
{stat = NC_EINVAL; goto done;}
|
||||
|
@ -25,7 +25,13 @@ FOREACH(CTEST ${UNIT_TESTS})
|
||||
add_bin_test(unit_test ${CTEST})
|
||||
ENDFOREACH()
|
||||
|
||||
# Aws Tests
|
||||
build_bin_test(test_aws)
|
||||
ADD_SH_TEST(unit_test run_aws)
|
||||
|
||||
# Performance tests
|
||||
add_bin_test(unit_test tst_exhash timer_utils.c)
|
||||
add_bin_test(unit_test tst_xcache timer_utils.c)
|
||||
|
||||
FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.sh)
|
||||
FILE(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/ FILE_PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE)
|
||||
|
@ -15,6 +15,7 @@ include $(top_srcdir)/lib_flags.am
|
||||
LDADD = ${top_builddir}/liblib/libnetcdf.la
|
||||
|
||||
check_PROGRAMS =
|
||||
TESTS =
|
||||
|
||||
check_PROGRAMS += tst_nclist test_ncuri test_pathcvt
|
||||
|
||||
@ -23,13 +24,17 @@ check_PROGRAMS += tst_exhash tst_xcache
|
||||
tst_exhash_SOURCES = tst_exhash.c timer_utils.c timer_utils.h
|
||||
tst_xcache_SOURCES = tst_xcache.c timer_utils.c timer_utils.h
|
||||
|
||||
TESTS += tst_nclist test_ncuri test_pathcvt tst_exhash tst_xcache
|
||||
|
||||
if USE_NETCDF4
|
||||
check_PROGRAMS += tst_nc4internal
|
||||
TESTS += tst_nc4internal
|
||||
endif # USE_NETCDF4
|
||||
|
||||
TESTS = ${check_PROGRAMS}
|
||||
check_PROGRAMS += test_aws
|
||||
TESTS += run_aws.sh
|
||||
|
||||
EXTRA_DIST = CMakeLists.txt
|
||||
EXTRA_DIST = CMakeLists.txt run_aws.sh
|
||||
|
||||
# If valgrind is present, add valgrind targets.
|
||||
@VALGRIND_CHECK_RULES@
|
||||
|
27
unit_test/run_aws.sh
Executable file
27
unit_test/run_aws.sh
Executable file
@ -0,0 +1,27 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
set -e
|
||||
|
||||
export NC_TEST_AWS_DIR=`pwd`
|
||||
|
||||
rm -fr ./.aws
|
||||
mkdir .aws
|
||||
cat >.aws/config <<EOF
|
||||
[default]
|
||||
aws_access_key_id=ACCESSKEYDEFAULTXXXX
|
||||
aws_secret_access_key=DEFAULT/ef0ghijklmnopqr/defaultxxxxxxxxx
|
||||
[ncar]
|
||||
aws_access_key_id=ACCESSKEYNCARXXXXXXX
|
||||
aws_secret_access_key=NCAR/ef0ghijklmnopqr/ncarxxxxxxxxxxxxxxx
|
||||
[unidata]
|
||||
aws_access_key_id=ACCESSKEYUNIDATAXXXX
|
||||
aws_secret_access_key=UNIDATA/ef0ghijklmnopqr/unidataxxxxxxxxx
|
||||
aws_region=us-west-1
|
||||
EOF
|
||||
|
||||
${execdir}/test_aws
|
||||
|
||||
|
175
unit_test/test_aws.c
Normal file
175
unit_test/test_aws.c
Normal file
@ -0,0 +1,175 @@
|
||||
/*********************************************************************
|
||||
* Copyright 2018, UCAR/Unidata
|
||||
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
|
||||
*********************************************************************/
|
||||
|
||||
/**
|
||||
Test the handling of aws profiles and regions.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include "netcdf.h"
|
||||
#include "ncrc.h"
|
||||
#include "ncpathmgr.h"
|
||||
|
||||
#define DEBUG
|
||||
|
||||
typedef struct ProfileTest {
|
||||
const char* profile;
|
||||
const char* access_key;
|
||||
const char* secret_key;
|
||||
const char* region;
|
||||
} ProfileTest;
|
||||
|
||||
typedef struct URLTest {
|
||||
const char* url;
|
||||
const char* newurl;
|
||||
const char* profile;
|
||||
const char* region;
|
||||
const char* bucket;
|
||||
} URLTest;
|
||||
|
||||
static ProfileTest PROFILETESTS[] = {
|
||||
{"default", "ACCESSKEYDEFAULTXXXX", "DEFAULT/ef0ghijklmnopqr/defaultxxxxxxxxx",""},
|
||||
{"ncar", "ACCESSKEYNCARXXXXXXX", "NCAR/ef0ghijklmnopqr/ncarxxxxxxxxxxxxxxx",""},
|
||||
{"unidata", "ACCESSKEYUNIDATAXXXX", "UNIDATA/ef0ghijklmnopqr/unidataxxxxxxxxx", "us-west-1"},
|
||||
{NULL, NULL,NULL,NULL}
|
||||
};
|
||||
|
||||
static URLTest URLTESTS[] = {
|
||||
{"s3://simplebucket#mode=nczarr,s3&aws.region=us-west-1",
|
||||
"s3://s3.us-west-1.amazonaws.com/simplebucket#mode=nczarr,s3&aws.region=us-west-1","default","us-west-1","simplebucket"},
|
||||
{"s3://simplebucket#mode=nczarr,s3&aws.profile=unidata",
|
||||
"s3://s3.us-west-1.amazonaws.com/simplebucket#mode=nczarr,s3&aws.profile=unidata","unidata","us-west-1","simplebucket"},
|
||||
{"https://s3.eu-east-1.amazonaws.com/simplebucket#mode=nczarr,s3&aws.profile=none",
|
||||
"https://s3.eu-east-1.amazonaws.com/simplebucket#mode=nczarr,s3&aws.profile=none","none","eu-east-1","simplebucket"},
|
||||
{"https://s3.eu-west-1.amazonaws.com/bucket2#mode=nczarr,s3",
|
||||
"https://s3.eu-west-1.amazonaws.com/bucket2#mode=nczarr,s3","default","eu-west-1","bucket2"},
|
||||
{NULL, NULL,NULL,NULL,NULL}
|
||||
};
|
||||
|
||||
static char* awstestdir0 = NULL;
|
||||
|
||||
void
|
||||
failurltest(URLTest* test)
|
||||
{
|
||||
fprintf(stderr,"***FAIL: urL=%s\n",test->url);
|
||||
#ifdef DEBUG
|
||||
abort();
|
||||
#endif
|
||||
exit(1);
|
||||
}
|
||||
|
||||
void
|
||||
failprofiletest(ProfileTest* test)
|
||||
{
|
||||
fprintf(stderr,"***FAIL: profile=%s\n",test->profile);
|
||||
#ifdef DEBUG
|
||||
abort();
|
||||
#endif
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static int
|
||||
testprofiles(void)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
ProfileTest* test;
|
||||
int index;
|
||||
|
||||
for(index=0,test=PROFILETESTS;test->profile;test++,index++) {
|
||||
const char* accesskey = NULL;
|
||||
const char* region = NULL;
|
||||
|
||||
if((stat = NC_s3profilelookup(test->profile, "aws_access_key_id", &accesskey))) goto done;
|
||||
if((stat = NC_s3profilelookup(test->profile, "aws_region", ®ion))) goto done;
|
||||
if(region == NULL) region = "";
|
||||
#ifdef DEBUG
|
||||
printf("profile=%s aws_access_key_id=%s region=%s\n",
|
||||
test->profile,
|
||||
(accesskey?accesskey:""),
|
||||
(region?region:""));
|
||||
#endif
|
||||
if(accesskey == NULL || strcasecmp(accesskey,test->access_key)!=0) failprofiletest(test);
|
||||
if(region == NULL || strcasecmp(region,test->region)!=0) failprofiletest(test);
|
||||
}
|
||||
done:
|
||||
return stat;
|
||||
}
|
||||
|
||||
static int
|
||||
testurls(void)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
URLTest* test;
|
||||
int index;
|
||||
NCURI* url = NULL;
|
||||
NCURI* url2 = NULL;
|
||||
const char* profile = NULL;
|
||||
char* region = NULL;
|
||||
char* bucket = NULL;
|
||||
char* newurl = NULL;
|
||||
|
||||
for(index=0,test=URLTESTS;test->url;test++,index++) {
|
||||
ncuriparse(test->url,&url);
|
||||
if(url == NULL) {
|
||||
fprintf(stderr,"URI parse fail: %s\n",test->url);
|
||||
goto done;
|
||||
}
|
||||
if((stat = NC_getactives3profile(url, &profile))) {
|
||||
fprintf(stderr,"active profile fail: %s\n",test->url);
|
||||
goto done;
|
||||
}
|
||||
if((stat = NC_s3urlrebuild(url, &url2, &bucket, ®ion))) {
|
||||
fprintf(stderr,"url rebuild failed: %s\n",test->url);
|
||||
goto done;
|
||||
}
|
||||
newurl = ncuribuild(url2,NULL,NULL,NCURIALL);
|
||||
#ifdef DEBUG
|
||||
printf("url=%s {url=%s bucket=%s region=%s profile=%s}\n",
|
||||
test->url,newurl,bucket,region,profile);
|
||||
#endif
|
||||
if(strcasecmp(newurl,test->newurl)!=0) failurltest(test);
|
||||
if(strcasecmp(profile,test->profile)!=0) failurltest(test);
|
||||
if(strcasecmp(region,test->region)!=0) failurltest(test);
|
||||
if(strcasecmp(bucket,test->bucket)!=0) failurltest(test);
|
||||
ncurifree(url); url = NULL;
|
||||
ncurifree(url2); url2 = NULL;
|
||||
nullfree(newurl); newurl = NULL;
|
||||
nullfree(bucket); bucket = NULL;
|
||||
nullfree(region); region = NULL;
|
||||
}
|
||||
done:
|
||||
return stat;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
main(int argc, char** argv)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
|
||||
awstestdir0 = getenv("NC_TEST_AWS_DIR");
|
||||
if(awstestdir0 == NULL) {
|
||||
fprintf(stderr,"NC_TEST_AWS_DIR environment variable is undefined\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Load RC and .aws/config */
|
||||
if((stat = nc_initialize())) goto done;
|
||||
|
||||
printf("testprofiles:\n-------------\n");
|
||||
stat = testprofiles();
|
||||
|
||||
printf("testurls:\n--------\n");
|
||||
stat = testurls();
|
||||
|
||||
printf("***PASS test_aws\n");
|
||||
|
||||
done:
|
||||
if(stat) printf("*** FAIL: %s(%d)\n",nc_strerror(stat),stat);
|
||||
exit(stat?1:0);
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user