Merging in latest from upstream (HDFFV/hdf5:refs/heads/develop)

* commit 'f97e11e7635a0cd8728d4604ca5dceb3925ba44c':
  Update comment and check for strtoumax.
  Modify CMakeLists.txt file for renamed h5tools_test_utils files.
  Add HD to string functions. Switched strtoul to strtoumax in H5FDs3comms.c. Removed unused functions and variables in s3 and hdfs TestH5Pfapl*.java. Update Copyright headers.
  Squashed commit of the following:
This commit is contained in:
Allen Byrne 2019-07-29 16:10:45 -05:00
commit 517cadac26
67 changed files with 19206 additions and 225 deletions

View File

@ -573,6 +573,27 @@ endif ()
include (${HDF_RESOURCES_DIR}/HDFCompilerFlags.cmake)
set (CMAKE_MODULE_PATH ${HDF_RESOURCES_DIR} ${HDF_RESOURCES_EXT_DIR} ${CMAKE_MODULE_PATH})
#-----------------------------------------------------------------------------
# Option to Enable HDFS
#-----------------------------------------------------------------------------
option (HDF5_ENABLE_HDFS "Enable HDFS" OFF)
if (HDF5_ENABLE_HDFS)
find_package(JNI REQUIRED)
if (JNI_FOUND)
set (H5_HAVE_LIBJVM 1)
endif ()
find_package(HDFS REQUIRED)
if (HDFS_FOUND)
set (H5_HAVE_LIBHDFS 1)
set (H5_HAVE_HDFS_H 1)
if (NOT MSVC)
list (APPEND LINK_LIBS -pthread)
endif ()
else ()
message (FATAL_ERROR "Set to use libhdfs library, but could not find or use libhdfs. Please verify that the path to HADOOP_HOME is valid, and/or reconfigure without HDF5_ENABLE_HDFS")
endif ()
endif ()
#-----------------------------------------------------------------------------
# Option to Enable MPI Parallel
#-----------------------------------------------------------------------------

View File

@ -636,6 +636,8 @@
./src/H5FDdrvr_module.h
./src/H5FDfamily.c
./src/H5FDfamily.h
./src/H5FDhdfs.c
./src/H5FDhdfs.h
./src/H5FDint.c
./src/H5FDlog.c
./src/H5FDlog.h
@ -646,9 +648,13 @@
./src/H5FDmpio.h
./src/H5FDmulti.c
./src/H5FDmulti.h
./src/H5FDros3.c
./src/H5FDros3.h
./src/H5FDpkg.h
./src/H5FDprivate.h
./src/H5FDpublic.h
./src/H5FDs3comms.h
./src/H5FDs3comms.c
./src/H5FDsec2.c
./src/H5FDsec2.h
./src/H5FDspace.c
@ -1069,6 +1075,7 @@
./test/h5fc_ext_none.h5
./test/h5test.c
./test/h5test.h
./test/hdfs.c
./test/hyperslab.c
./test/istore.c
./test/le_data.h5
@ -1094,7 +1101,9 @@
./test/paged_nopersist.h5
./test/paged_persist.h5
./test/reserved.c
./test/ros3.c
./test/pool.c
./test/s3comms.c
./test/set_extent.c
# ====distribute this for now. See HDFFV-8236====
./test/space_overflow.c
@ -1546,6 +1555,9 @@
./tools/lib/io_timer.c
./tools/lib/io_timer.h
./tools/libtest/Makefile.am
./tools/libtest/h5tools_utils.c
./tools/src/misc/Makefile.am
./tools/src/misc/h5clear.c
./tools/src/misc/h5debug.c
@ -3015,6 +3027,8 @@
./java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java
./java/src/hdf/hdf5lib/structs/H5E_error2_t.java
./java/src/hdf/hdf5lib/structs/H5F_info2_t.java
./java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
./java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java
./java/src/hdf/hdf5lib/structs/H5G_info_t.java
./java/src/hdf/hdf5lib/structs/H5L_info_t.java
./java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java
@ -3178,6 +3192,8 @@
./java/test/testfiles/JUnit-TestH5P.txt
./java/test/testfiles/JUnit-TestH5PData.txt
./java/test/testfiles/JUnit-TestH5Pfapl.txt
./java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt
./java/test/testfiles/JUnit-TestH5Pfapls3.txt
./java/test/testfiles/JUnit-TestH5Plist.txt
./java/test/testfiles/JUnit-TestH5Pvirtual.txt
./java/test/testfiles/JUnit-TestH5PL.txt
@ -3216,6 +3232,8 @@
./java/test/TestH5P.java
./java/test/TestH5PData.java
./java/test/TestH5Pfapl.java
./java/test/TestH5Pfaplhdfs.java
./java/test/TestH5Pfapls3.java
./java/test/TestH5Plist.java
./java/test/TestH5Pvirtual.java
./java/test/TestH5PL.java
@ -3249,6 +3267,7 @@
./config/cmake/ConfigureChecks.cmake
./config/cmake/CPack.Info.plist.in
./config/cmake/CTestCustom.cmake
./config/cmake/FindHDFS.cmake
./config/cmake/H5cxx_config.h.in
./config/cmake/H5pubconf.h.in
./config/cmake/hdf5-config.cmake.in
@ -3358,6 +3377,8 @@
./testpar/CMakeVFDTests.cmake
./tools/CMakeLists.txt
./tools/lib/CMakeLists.txt
./tools/libtest/CMakeLists.txt
./tools/libtest/CMakeTests.cmake
./tools/src/CMakeLists.txt
./tools/test/CMakeLists.txt
./tools/src/h5copy/CMakeLists.txt
@ -3478,6 +3499,7 @@
./testpar/Makefile.in
./tools/Makefile.in
./tools/lib/Makefile.in
./tools/libtest/Makefile.in
./tools/src/Makefile.in
./tools/src/h5copy/Makefile.in
./tools/src/h5diff/Makefile.in

View File

@ -139,6 +139,8 @@ $Source = "";
"H5FD_t" => "x",
"H5FD_class_t" => "x",
"H5FD_stream_fapl_t" => "x",
"H5FD_ros3_fapl_t" => "x",
"H5FD_hdfs_fapl_t" => "x",
"H5FD_file_image_callbacks_t" => "x",
"H5G_iterate_t" => "x",
"H5G_info_t" => "x",

View File

@ -154,6 +154,21 @@ if (NOT WINDOWS)
endif ()
endif ()
#-----------------------------------------------------------------------------
# Check if ROS3 driver can be built
#-----------------------------------------------------------------------------
option (HDF5_ENABLE_ROS3_VFD "Build the ROS3 Virtual File Driver" OFF)
if (HDF5_ENABLE_ROS3_VFD)
find_package(CURL REQUIRED)
find_package(OpenSSL REQUIRED)
if (${CURL_FOUND} AND ${OPENSSL_FOUND})
set (${HDF_PREFIX}_HAVE_ROS3_VFD 1)
list (APPEND LINK_LIBS ${CURL_LIBRARIES} ${OPENSSL_LIBRARIES})
else ()
message (STATUS "The Read-Only S3 VFD was requested but cannot be built.\nPlease check that openssl and cURL are available on your\nsystem, and/or re-configure without option HDF5_ENABLE_ROS3_VFD.")
endif ()
endif ()
#-----------------------------------------------------------------------------
# Check if C has __float128 extension
#-----------------------------------------------------------------------------

View File

@ -0,0 +1,70 @@
# DerivedFrom: https://github.com/cloudera/Impala/blob/cdh5-trunk/cmake_modules/FindHDFS.cmake
# - Find HDFS (hdfs.h and libhdfs.so)
# This module defines
# Hadoop_VERSION, version string of ant if found
# HDFS_INCLUDE_DIR, directory containing hdfs.h
# HDFS_LIBRARIES, location of libhdfs.so
# HDFS_FOUND, whether HDFS is found.
exec_program($ENV{HADOOP_HOME}/bin/hadoop ARGS version OUTPUT_VARIABLE Hadoop_VERSION
RETURN_VALUE Hadoop_RETURN)
# currently only looking in HADOOP_HOME
find_path(HDFS_INCLUDE_DIR hdfs.h PATHS
$ENV{HADOOP_HOME}/include/
# make sure we don't accidentally pick up a different version
NO_DEFAULT_PATH
)
if ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "8")
set(arch_hint "x64")
elseif ("$ENV{LIB}" MATCHES "(amd64|ia64)")
set(arch_hint "x64")
else ()
set(arch_hint "x86")
endif()
message(STATUS "Architecture: ${arch_hint}")
if ("${arch_hint}" STREQUAL "x64")
set(HDFS_LIB_PATHS $ENV{HADOOP_HOME}/lib/native)
else ()
set(HDFS_LIB_PATHS $ENV{HADOOP_HOME}/lib/native)
endif ()
message(STATUS "HDFS_LIB_PATHS: ${HDFS_LIB_PATHS}")
find_library(HDFS_LIB NAMES hdfs PATHS
${HDFS_LIB_PATHS}
# make sure we don't accidentally pick up a different version
NO_DEFAULT_PATH
)
if (HDFS_LIB)
set(HDFS_FOUND TRUE)
set(HDFS_LIBRARIES ${HDFS_LIB})
set(HDFS_STATIC_LIB ${HDFS_LIB_PATHS}/${CMAKE_STATIC_LIBRARY_PREFIX}hdfs${CMAKE_STATIC_LIBRARY_SUFFIX})
add_library(hdfs_static STATIC IMPORTED)
set_target_properties(hdfs_static PROPERTIES IMPORTED_LOCATION ${HDFS_STATIC_LIB})
else ()
set(HDFS_FOUND FALSE)
endif ()
if (HDFS_FOUND)
if (NOT HDFS_FIND_QUIETLY)
message(STATUS "${Hadoop_VERSION}")
message(STATUS "HDFS_INCLUDE_DIR: ${HDFS_INCLUDE_DIR}")
message(STATUS "HDFS_LIBRARIES: ${HDFS_LIBRARIES}")
message(STATUS "hdfs_static: ${HDFS_STATIC_LIB}")
endif ()
else ()
message(FATAL_ERROR "HDFS includes and libraries NOT found."
"(${HDFS_INCLUDE_DIR}, ${HDFS_LIB})")
endif ()
mark_as_advanced(
HDFS_LIBRARIES
HDFS_INCLUDE_DIR
)

View File

@ -110,6 +110,9 @@
/* Define if the function stack tracing code is to be compiled in */
#cmakedefine H5_HAVE_CODESTACK @H5_HAVE_CODESTACK@
/* Define to 1 if you have the <curl/curl.h> header file. */
#cmakedefine H5_HAVE_CURL_H @H5_HAVE_CURL_H@
/* Define if Darwin or Mac OS X */
#cmakedefine H5_HAVE_DARWIN @H5_HAVE_DARWIN@
@ -185,6 +188,9 @@
/* Define to 1 if you have the `gettimeofday' function. */
#cmakedefine H5_HAVE_GETTIMEOFDAY @H5_HAVE_GETTIMEOFDAY@
/* Define to 1 if you have the <hdfs.h> header file. */
#cmakedefine H5_HAVE_HDFS_H @H5_HAVE_HDFS_H@
/* Define if the compiler understands inline */
#cmakedefine H5_HAVE_INLINE @H5_HAVE_INLINE@
@ -201,12 +207,24 @@
/* Define to 1 if you have the <io.h> header file. */
#cmakedefine H5_HAVE_IO_H @H5_HAVE_IO_H@
/* Define to 1 if you have the `crypto' library (-lcrypto). */
#cmakedefine H5_HAVE_LIBCRYPTO @H5_HAVE_LIBCRYPTO@
/* Define to 1 if you have the `curl' library (-lcurl). */
#cmakedefine H5_HAVE_LIBCURL @H5_HAVE_LIBCURL@
/* Define to 1 if you have the `dl' library (-ldl). */
#cmakedefine H5_HAVE_LIBDL @H5_HAVE_LIBDL@
/* Define to 1 if you have the `dmalloc' library (-ldmalloc). */
#cmakedefine H5_HAVE_LIBDMALLOC @H5_HAVE_LIBDMALLOC@
/* Proceed to build with libhdfs */
#cmakedefine H5_HAVE_LIBHDFS @H5_HAVE_LIBHDFS@
/* Define to 1 if you have the `jvm' library (-ljvm). */
#cmakedefine H5_HAVE_LIBJVM @H5_HAVE_LIBJVM@
/* Define to 1 if you have the `m' library (-lm). */
#cmakedefine H5_HAVE_LIBM @H5_HAVE_LIBM@
@ -264,6 +282,15 @@
/* Define if MPI_Info_c2f and MPI_Info_f2c exists */
#cmakedefine H5_HAVE_MPI_MULTI_LANG_Info @H5_HAVE_MPI_MULTI_LANG_Info@
/* Define to 1 if you have the <openssl/evp.h> header file. */
#cmakedefine H5_HAVE_OPENSSL_EVP_H @H5_HAVE_OPENSSL_EVP_H@
/* Define to 1 if you have the <openssl/hmac.h> header file. */
#cmakedefine H5_HAVE_OPENSSL_HMAC_H @H5_HAVE_OPENSSL_HMAC_H@
/* Define to 1 if you have the <openssl/sha.h> header file. */
#cmakedefine H5_HAVE_OPENSSL_SHA_H @H5_HAVE_OPENSSL_SHA_H@
/* Define if we have parallel support */
#cmakedefine H5_HAVE_PARALLEL @H5_HAVE_PARALLEL@
@ -282,6 +309,10 @@
/* Define to 1 if you have the `rand_r' function. */
#cmakedefine H5_HAVE_RAND_R @H5_HAVE_RAND_R@
/* Define whether the Read-Only S3 virtual file driver (VFD) should be
compiled */
#cmakedefine H5_HAVE_ROS3_VFD @H5_HAVE_ROS3_VFD@
/* Define to 1 if you have the `round' function. */
#cmakedefine H5_HAVE_ROUND @H5_HAVE_ROUND@

View File

@ -76,6 +76,8 @@ Parallel Filtered Dataset Writes: @PARALLEL_FILTERED_WRITES@
I/O filters (external): @EXTERNAL_FILTERS@
MPE: @H5_HAVE_LIBLMPE@
Direct VFD: @H5_HAVE_DIRECT@
(Read-Only) S3 VFD: @H5_HAVE_ROS3_VFD@
(Read-Only) HDFS VFD: @H5_HAVE_LIBHDFS@
dmalloc: @H5_HAVE_LIBDMALLOC@
Packages w/ extra debug output: @INTERNAL_DEBUG_OUTPUT@
API Tracing: @HDF5_ENABLE_TRACE@

View File

@ -2808,6 +2808,130 @@ fi
## Direct VFD files are not built if not required.
AM_CONDITIONAL([DIRECT_VFD_CONDITIONAL], [test "X$DIRECT_VFD" = "Xyes"])
## ----------------------------------------------------------------------
## Check if Read-Only S3 virtual file driver is enabled by --enable-ros3-vfd
##
AC_SUBST([ROS3_VFD])
## Default is no Read-Only S3 VFD
ROS3_VFD=no
AC_ARG_ENABLE([ros3-vfd],
[AS_HELP_STRING([--enable-ros3-vfd],
[Build the Read-Only S3 virtual file driver (VFD).
[default=no]])],
[ROS3_VFD=$enableval], [ROS3_VFD=no])
if test "X$ROS3_VFD" = "Xyes"; then
AC_CHECK_HEADERS([curl/curl.h],, [unset ROS3_VFD])
AC_CHECK_HEADERS([openssl/evp.h],, [unset ROS3_VFD])
AC_CHECK_HEADERS([openssl/hmac.h],, [unset ROS3_VFD])
AC_CHECK_HEADERS([openssl/sha.h],, [unset ROS3_VFD])
if test "X$ROS3_VFD" = "Xyes"; then
AC_CHECK_LIB([curl], [curl_global_init],, [unset ROS3_VFD])
AC_CHECK_LIB([crypto], [EVP_sha256],, [unset ROS3_VFD])
fi
AC_MSG_CHECKING([if the Read-Only S3 virtual file driver (VFD) is enabled])
if test "X$ROS3_VFD" = "Xyes"; then
AC_DEFINE([HAVE_ROS3_VFD], [1],
[Define whether the Read-Only S3 virtual file driver (VFD) should be compiled])
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no])
ROS3_VFD=no
AC_MSG_ERROR([The Read-Only S3 VFD was requested but cannot be built.
Please check that openssl and cURL are available on your
system, and/or re-configure without option
--enable-ros3-vfd.])
fi
else
AC_MSG_CHECKING([if the Read-Only S3 virtual file driver (VFD) is enabled])
AC_MSG_RESULT([no])
ROS3_VFD=no
fi
## ----------------------------------------------------------------------
## Is libhdfs (Hadoop Distributed File System) present?
## It might be specified with the `--with-libhdfs' command-line switch.
## If found, enables the HDFS VFD.
##
AC_SUBST([HAVE_LIBHDFS])
AC_ARG_WITH([libhdfs],
[AS_HELP_STRING([--with-libhdfs=DIR],
[Provide libhdfs library to enable HDFS virtual file driver (VFD) [default=no]])],,
[withval=no])
case $withval in
no)
HAVE_LIBHDFS="no"
AC_MSG_CHECKING([for libhdfs])
AC_MSG_RESULT([suppressed])
;;
*)
HAVE_LIBHDFS="yes"
case "$withval" in
*,*)
libhdfs_inc="`echo $withval |cut -f1 -d,`"
libhdfs_lib="`echo $withval |cut -f2 -d, -s`"
;;
yes)
libhdfs_inc="$HADOOP_HOME/include"
libhdfs_lib="$HADOOP_HOME/lib"
;;
*)
if test -n "$withval"; then
libhdfs_inc="$withval/include"
libhdfs_lib="$withval/lib"
fi
;;
esac
if test -n "$libhdfs_inc"; then
CPPFLAGS="$CPPFLAGS -I$libhdfs_inc"
AM_CPPFLAGS="$AM_CPPFLAGS -I$libhdfs_inc"
fi
AC_CHECK_HEADERS([hdfs.h],,
[unset HAVE_LIBHDFS])
if test "x$HAVE_LIBHDFS" = "xyes"; then
dnl Check for '-ljvm' needed by libhdfs
JNI_LDFLAGS=""
if test $JAVA_HOME != ""
then
JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH -L$JAVA_HOME/jre/lib/$OS_ARCH/server"
fi
ldflags_bak=$LDFLAGS
LDFLAGS="$LDFLAGS $JNI_LDFLAGS"
AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs])
LDFLAGS=$ldflags_bak
AC_SUBST([JNI_LDFLAGS])
if test -n "$libhdfs_lib"; then
## Hadoop distribution hides libraries down one level in 'lib/native'
libhdfs_lib="$libhdfs_lib/native"
LDFLAGS="$LDFLAGS -L$libhdfs_lib $JNI_LDFLAGS"
AM_LDFLAGS="$AM_LDFLAGS -L$libhdfs_lib $JNI_LDFLAGS"
fi
AC_CHECK_LIB([hdfs], [hdfsConnect],,
[unset HAVE_LIBHDFS])
fi
if test -z "$HAVE_LIBHDFS"; then
AC_MSG_ERROR([Set to use libhdfs library, but could not find or use
libhdfs. Please verify that the path to HADOOP_HOME is
valid, and/or reconfigure without --with-libhdfs.])
fi
;;
esac
if test "x$HAVE_LIBHDFS" = "xyes"; then
AC_DEFINE([HAVE_LIBHDFS], [1],
[Proceed to build with libhdfs])
fi
## Checkpoint the cache
AC_CACHE_SAVE
## ----------------------------------------------------------------------
## Enable custom plugin default path for library. It requires SHARED support.
##
@ -3537,6 +3661,7 @@ AC_CONFIG_FILES([src/libhdf5.settings
testpar/testpflush.sh
tools/Makefile
tools/lib/Makefile
tools/libtest/Makefile
tools/src/Makefile
tools/src/h5dump/Makefile
tools/src/h5import/Makefile

View File

@ -98,6 +98,8 @@ hdf5_java_JAVA = \
${pkgpath}/structs/H5A_info_t.java \
${pkgpath}/structs/H5E_error2_t.java \
${pkgpath}/structs/H5F_info2_t.java \
${pkgpath}/structs/H5FD_hdfs_fapl_t.java \
${pkgpath}/structs/H5FD_ros3_fapl_t.java \
${pkgpath}/structs/H5G_info_t.java \
${pkgpath}/structs/H5L_info_t.java \
${pkgpath}/structs/H5O_info_t.java \

View File

@ -73,6 +73,8 @@ set (HDF5_JAVA_HDF_HDF5_STRUCTS_SOURCES
structs/H5AC_cache_config_t.java
structs/H5E_error2_t.java
structs/H5F_info2_t.java
structs/H5FD_ros3_fapl_t.java
structs/H5FD_hdfs_fapl_t.java
structs/H5G_info_t.java
structs/H5L_info_t.java
structs/H5O_hdr_info_t.java

View File

@ -50,6 +50,8 @@ import hdf.hdf5lib.structs.H5AC_cache_config_t;
import hdf.hdf5lib.structs.H5A_info_t;
import hdf.hdf5lib.structs.H5E_error2_t;
import hdf.hdf5lib.structs.H5F_info2_t;
import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t;
import hdf.hdf5lib.structs.H5FD_ros3_fapl_t;
import hdf.hdf5lib.structs.H5G_info_t;
import hdf.hdf5lib.structs.H5L_info_t;
import hdf.hdf5lib.structs.H5O_info_t;
@ -7796,6 +7798,10 @@ public class H5 implements java.io.Serializable {
public synchronized static native int H5Pset_fapl_family(long fapl_id, long memb_size, long memb_fapl_id)
throws HDF5LibraryException, NullPointerException;
public synchronized static native int H5Pset_fapl_hdfs(long fapl_id, H5FD_hdfs_fapl_t fapl_conf) throws HDF5LibraryException, NullPointerException;
public synchronized static native H5FD_hdfs_fapl_t H5Pget_fapl_hdfs(long fapl_id) throws HDF5LibraryException, NullPointerException;
/**
* H5Pget_fapl_multi Sets up use of the multi I/O driver.
*
@ -7880,6 +7886,10 @@ public class H5 implements java.io.Serializable {
public synchronized static native int H5Pset_fapl_windows(long fapl_id) throws HDF5LibraryException, NullPointerException;
public synchronized static native int H5Pset_fapl_ros3(long fapl_id, H5FD_ros3_fapl_t fapl_conf) throws HDF5LibraryException, NullPointerException;
public synchronized static native H5FD_ros3_fapl_t H5Pget_fapl_ros3(long fapl_id) throws HDF5LibraryException, NullPointerException;
// /////// unimplemented ////////
// Generic property list routines //

View File

@ -246,6 +246,8 @@ public class HDF5Constants {
public static final long H5FD_SEC2 = H5FD_SEC2();
public static final long H5FD_STDIO = H5FD_STDIO();
public static final long H5FD_WINDOWS = H5FD_WINDOWS();
public static final long H5FD_ROS3 = H5FD_ROS3();
public static final long H5FD_HDFS = H5FD_HDFS();
public static final int H5FD_LOG_LOC_READ = H5FD_LOG_LOC_READ();
public static final int H5FD_LOG_LOC_WRITE = H5FD_LOG_LOC_WRITE();
public static final int H5FD_LOG_LOC_SEEK = H5FD_LOG_LOC_SEEK();
@ -1111,6 +1113,10 @@ public class HDF5Constants {
private static native final long H5FD_WINDOWS();
private static native final long H5FD_ROS3();
private static native final long H5FD_HDFS();
private static native final int H5FD_LOG_LOC_READ();
private static native final int H5FD_LOG_LOC_WRITE();

View File

@ -0,0 +1,102 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Read-Only HDFS Virtual File Driver (VFD) *
* Copyright (c) 2018, The HDF Group. *
* *
* All rights reserved. *
* *
* NOTICE: *
* All information contained herein is, and remains, the property of The HDF *
* Group. The intellectual and technical concepts contained herein are *
* proprietary to The HDF Group. Dissemination of this information or *
* reproduction of this material is strictly forbidden unless prior written *
* permission is obtained from The HDF Group. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
import java.io.Serializable;
/*
* Java representation of the HDFS VFD file access property list (fapl)
* structure.
*
* Used for the access of files hosted on the Hadoop Distributed File System.
*/
@SuppressWarnings("serial") // mute default serialUID warnings until someone knowledgeable comes along or something breaks horribly
public class H5FD_hdfs_fapl_t implements Serializable {
private long version;
private String namenode_name;
private String user_name;
private String kerberos_ticket_cache;
private int namenode_port;
private int stream_buffer_size;
/**
* Create a fapl_t structure with the specified components.
*/
public H5FD_hdfs_fapl_t(
String namenode_name,
int namenode_port,
String user_name,
String kerberos_ticket_cache,
int stream_buffer_size)
{
this.version = 1;
this.namenode_name = namenode_name;
this.namenode_port = namenode_port;
this.user_name = user_name;
this.kerberos_ticket_cache = kerberos_ticket_cache;
this.stream_buffer_size = stream_buffer_size;
}
@Override
public boolean equals(Object o) {
if (o == null)
return false;
if (!(o instanceof H5FD_hdfs_fapl_t))
return false;
H5FD_hdfs_fapl_t other = (H5FD_hdfs_fapl_t)o;
if (this.version != other.version)
return false;
if (!this.namenode_name.equals(other.namenode_name))
return false;
if (this.namenode_port != other.namenode_port)
return false;
if (!this.user_name.equals(other.user_name))
return false;
if (!this.kerberos_ticket_cache.equals(other.kerberos_ticket_cache))
return false;
if (this.stream_buffer_size != other.stream_buffer_size)
return false;
return true;
}
@Override
public int hashCode() {
/* this is a _very bad_ hash algorithm for purposes of hashing! */
/* implemented to satisfy the "contract" regarding equality */
int k = (int)this.version;
k += this.namenode_name.length();
k += this.user_name.length();
k += this.kerberos_ticket_cache.length();
k += namenode_port;
k += stream_buffer_size;
return k;
}
@Override
public String toString() {
return "H5FD_hdfs_fapl_t (Version: " + this.version + ") {" +
"\n namenode_name: '" + this.namenode_name +
"'\n namenode_port: " + this.namenode_port +
"\n user_name: '" + this.user_name +
"'\n kerberos_ticket_cache: '" + this.kerberos_ticket_cache +
"'\n stream_buffer_size: " + this.stream_buffer_size +
"\n}\n";
}
}

View File

@ -0,0 +1,121 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Read-Only S3 Virtual File Driver (VFD) *
* Copyright (c) 2017-2018, The HDF Group. *
* *
* All rights reserved. *
* *
* NOTICE: *
* All information contained herein is, and remains, the property of The HDF *
* Group. The intellectual and technical concepts contained herein are *
* proprietary to The HDF Group. Dissemination of this information or *
* reproduction of this material is strictly forbidden unless prior written *
* permission is obtained from The HDF Group. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
import java.io.Serializable;
/*
* Java representation of the ROS3 VFD file access property list (fapl)
* structure.
*
* Used for the access of files hosted remotely on S3 by Amazon.
*
* For simplicity, implemented assuming that all ROS3 fapls have components:
* - version
* - aws_region
* - secret_id
* - secret_key
*
* Future implementations may be created to enable different fapl "shapes"
* depending on provided version.
*
* proposed:
*
* H5FD_ros3_fapl_t (super class, has only version field)
* H5FD_ros3_fapl_v1_t (extends super with Version 1 components)
* H5FD_ros3_fapl_v2_t (extends super with Version 2 components)
* and so on, for each version
*
* "super" is passed around, and is version-checked and re-cast as
* appropriate
*/
@SuppressWarnings("serial") // mute default serialUID warnings until someone knowledgeable comes along or something breaks horribly
public class H5FD_ros3_fapl_t implements Serializable {
private long version;
private String aws_region;
private String secret_id;
private String secret_key;
/**
* Create a "default" fapl_t structure, for anonymous access.
*/
public H5FD_ros3_fapl_t () {
/* H5FD_ros3_fapl_t("", "", ""); */ /* defer */
this.version = 1;
this.aws_region = "";
this.secret_id = "";
this.secret_key = "";
}
/**
* Create a fapl_t structure with the specified components.
* If all are the empty string, is anonymous (non-authenticating).
* Region and ID must both be supplied for authentication.
*
* @param region "aws region" for authenticating request
* @param id "secret id" or "access id" for authenticating request
* @param key "secret key" or "access key" for authenticating request
*/
public H5FD_ros3_fapl_t (String region, String id, String key) {
this.version = 1; /* must equal H5FD__CURR_ROS3_FAPL_T_VERSION */
/* as found in H5FDros3.h */
this.aws_region = region;
this.secret_id = id;
this.secret_key = key;
}
@Override
public boolean equals(Object o) {
if (o == null)
return false;
if (!(o instanceof H5FD_ros3_fapl_t))
return false;
H5FD_ros3_fapl_t other = (H5FD_ros3_fapl_t)o;
if (this.version != other.version)
return false;
if (!this.aws_region.equals(other.aws_region))
return false;
if (!this.secret_key.equals(other.secret_key))
return false;
if (!this.secret_id.equals(other.secret_id))
return false;
return true;
}
@Override
public int hashCode() {
/* this is a _very bad_ hash algorithm for purposes of hashing! */
/* implemented to satisfy the "contract" regarding equality */
int k = (int)this.version;
k += this.aws_region.length();
k += this.secret_id.length();
k += this.secret_key.length();
return k;
}
@Override
public String toString() {
return "H5FD_ros3_fapl_t (Version:" + this.version + ") {" +
"\n aws_region : " + this.aws_region +
"\n secret_id : " + this.secret_id +
"\n secret_key : " + this.secret_key +
"\n}\n";
}
}

View File

@ -453,6 +453,8 @@ Java_hdf_hdf5lib_HDF5Constants_H5FD_1DIRECT(JNIEnv *env, jclass cls) {
JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1FAMILY(JNIEnv *env, jclass cls) { return H5FD_FAMILY; }
JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1HDFS(JNIEnv *env, jclass cls) { return H5FD_HDFS; }
JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG(JNIEnv *env, jclass cls) { return H5FD_LOG; }
JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MPIO(JNIEnv *env, jclass cls) { return H5FD_MPIO; }
@ -461,6 +463,8 @@ Java_hdf_hdf5lib_HDF5Constants_H5FD_1MULTI(JNIEnv *env, jclass cls) { return H5F
JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1SEC2(JNIEnv *env, jclass cls) { return H5FD_SEC2; }
JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1ROS3(JNIEnv *env, jclass cls) { return H5FD_ROS3; }
JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1STDIO(JNIEnv *env, jclass cls) { return H5FD_STDIO; }
JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1WINDOWS(JNIEnv *env, jclass cls) {

View File

@ -51,6 +51,20 @@ if (NOT HDF5_ENABLE_DEBUG_APIS)
)
endif ()
if (HDF5_ENABLE_ROS3_VFD)
set (HDF5_JAVA_TEST_SOURCES
${HDF5_JAVA_TEST_SOURCES}
TestH5Pfapls3
)
endif ()
if (HDF5_ENABLE_HDFS)
set (HDF5_JAVA_TEST_SOURCES
${HDF5_JAVA_TEST_SOURCES}
TestH5Pfaplhdfs
)
endif ()
set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_LIB_DIR}/junit.jar;${HDF5_JAVA_LIB_DIR}/hamcrest-core.jar;${HDF5_JAVA_JARS};${HDF5_JAVA_LOGGING_JAR};${HDF5_JAVA_LOGGING_SIMPLE_JAR}")
foreach (test_file ${HDF5_JAVA_TEST_SOURCES})

View File

@ -61,6 +61,8 @@ noinst_JAVA = \
TestH5P.java \
TestH5PData.java \
TestH5Pfapl.java \
TestH5Pfaplhdfs.java \
TestH5Pfapls3.java \
TestH5Pvirtual.java \
TestH5Plist.java \
TestH5A.java \

View File

@ -27,6 +27,7 @@ import org.junit.runners.Suite;
TestH5Lparams.class, TestH5Lbasic.class, TestH5Lcreate.class,
TestH5R.class,
TestH5P.class, TestH5PData.class, TestH5Pfapl.class, TestH5Pvirtual.class, TestH5Plist.class,
TestH5Pfapls3.class, TestH5Pfaplhdfs.class,
TestH5A.class,
TestH5Oparams.class, TestH5Obasic.class, TestH5Ocopy.class, TestH5Ocreate.class,
TestH5PL.class, TestH5Z.class

View File

@ -15,6 +15,7 @@ package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -28,6 +29,8 @@ import hdf.hdf5lib.exceptions.HDF5Exception;
import hdf.hdf5lib.exceptions.HDF5LibraryException;
import hdf.hdf5lib.exceptions.HDF5PropertyListInterfaceException;
import hdf.hdf5lib.structs.H5AC_cache_config_t;
import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t;
import hdf.hdf5lib.structs.H5FD_ros3_fapl_t;
import org.junit.After;
import org.junit.Before;
@ -1398,4 +1401,5 @@ public class TestH5Pfapl {
fail("H5P_evict_on_close: " + err);
}
}
}

View File

@ -0,0 +1,181 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* Copyright by the Board of Trustees of the University of Illinois. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import hdf.hdf5lib.H5;
import hdf.hdf5lib.HDF5Constants;
import hdf.hdf5lib.exceptions.HDF5Exception;
import hdf.hdf5lib.exceptions.HDF5LibraryException;
import hdf.hdf5lib.exceptions.HDF5PropertyListInterfaceException;
import hdf.hdf5lib.structs.H5AC_cache_config_t;
import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t;
import hdf.hdf5lib.structs.H5FD_ros3_fapl_t;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
public class TestH5Pfaplhdfs {
@Rule public TestName testname = new TestName();
long H5fid = -1;
long H5dsid = -1;
long H5did = -1;
long H5Fdsid = -1;
long H5Fdid = -1;
long fapl_id = -1;
long plapl_id = -1;
long dapl_id = -1;
long plist_id = -1;
long btplist_id = -1;
@Before
public void createFileAccess()
throws NullPointerException, HDF5Exception {
assertTrue("H5 open ids is 0",H5.getOpenIDCount()==0);
System.out.print(testname.getMethodName());
try {
fapl_id = H5.H5Pcreate(HDF5Constants.H5P_FILE_ACCESS);
}
catch (Throwable err) {
err.printStackTrace();
fail("TestH5Pfapl.createFileAccess: " + err);
}
assertTrue(fapl_id > 0);
try {
plapl_id = H5.H5Pcreate(HDF5Constants.H5P_LINK_ACCESS);
}
catch (Throwable err) {
err.printStackTrace();
fail("TestH5Pfapl.createFileAccess: " + err);
}
assertTrue(plapl_id > 0);
try {
plist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER);
btplist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER);
dapl_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_ACCESS);
}
catch (Throwable err) {
err.printStackTrace();
fail("TestH5Pfapl.createFileAccess: " + err);
}
assertTrue(plist_id > 0);
assertTrue(btplist_id > 0);
assertTrue(dapl_id > 0);
}
@After
public void deleteFileAccess() throws HDF5LibraryException {
if (fapl_id > 0)
try {H5.H5Pclose(fapl_id);} catch (Exception ex) {}
if (plapl_id > 0)
try {H5.H5Pclose(plapl_id);} catch (Exception ex) {}
if (dapl_id > 0)
try {H5.H5Pclose(dapl_id);} catch (Exception ex) {}
if (plist_id > 0)
try {H5.H5Pclose(plist_id);} catch (Exception ex) {}
if (btplist_id > 0)
try {H5.H5Pclose(btplist_id);} catch (Exception ex) {}
if (H5Fdsid > 0)
try {H5.H5Sclose(H5Fdsid);} catch (Exception ex) {}
if (H5Fdid > 0)
try {H5.H5Dclose(H5Fdid);} catch (Exception ex) {}
if (H5dsid > 0)
try {H5.H5Sclose(H5dsid);} catch (Exception ex) {}
if (H5did > 0)
try {H5.H5Dclose(H5did);} catch (Exception ex) {}
if (H5fid > 0)
try {H5.H5Fclose(H5fid);} catch (Exception ex) {}
System.out.println();
}
@Test
public void testHDFS_fapl()
throws Exception
{
if (HDF5Constants.H5FD_HDFS < 0)
throw new HDF5LibraryException("skip");
String nodename = "blues";
int nodeport = 12345;
String username = "sparticus";
String kerbcache = "/dev/null";
int streamsize = 1024;
final H5FD_hdfs_fapl_t config = new H5FD_hdfs_fapl_t(
nodename,
nodeport,
username,
kerbcache,
streamsize
);
assertTrue("setting fapl should succeed",
-1 < H5.H5Pset_fapl_hdfs(fapl_id, config));
assertEquals("driver types should match",
HDF5Constants.H5FD_HDFS,
H5.H5Pget_driver(fapl_id));
H5FD_hdfs_fapl_t copy = H5.H5Pget_fapl_hdfs(fapl_id);
assertEquals("fapl contents should match",
new H5FD_hdfs_fapl_t(
nodename,
nodeport,
username,
kerbcache,
streamsize),
copy);
}
@Test(expected = HDF5LibraryException.class)
public void testH5Pget_fapl_hdfs_invalid_fapl_id()
throws Exception
{
if (HDF5Constants.H5FD_HDFS < 0)
throw new HDF5LibraryException("skip");
H5FD_hdfs_fapl_t fails = H5.H5Pget_fapl_hdfs(-1);
}
@Test(expected = HDF5LibraryException.class)
public void testH5Pget_fapl_hdfs_fapl_id_of_wrong_driver_type()
throws Exception
{
if (HDF5Constants.H5FD_HDFS < 0)
throw new HDF5LibraryException("skip");
if (HDF5Constants.H5FD_SEC2 < 0 )
throw new HDF5LibraryException("skip");
/* TODO: for now, test against a sec2 fapl only */
H5.H5Pset_fapl_sec2(fapl_id);
assertEquals("fapl_id was not set properly",
HDF5Constants.H5FD_SEC2,
H5.H5Pget_driver(fapl_id));
H5FD_hdfs_fapl_t fails = H5.H5Pget_fapl_hdfs(fapl_id);
}
}

View File

@ -0,0 +1,194 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* Copyright by the Board of Trustees of the University of Illinois. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import hdf.hdf5lib.H5;
import hdf.hdf5lib.HDF5Constants;
import hdf.hdf5lib.exceptions.HDF5Exception;
import hdf.hdf5lib.exceptions.HDF5LibraryException;
import hdf.hdf5lib.exceptions.HDF5PropertyListInterfaceException;
import hdf.hdf5lib.structs.H5AC_cache_config_t;
import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t;
import hdf.hdf5lib.structs.H5FD_ros3_fapl_t;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
public class TestH5Pfapls3 {
@Rule public TestName testname = new TestName();
long H5fid = -1;
long H5dsid = -1;
long H5did = -1;
long H5Fdsid = -1;
long H5Fdid = -1;
long fapl_id = -1;
long plapl_id = -1;
long dapl_id = -1;
long plist_id = -1;
long btplist_id = -1;
@Before
public void createFileAccess()
throws NullPointerException, HDF5Exception {
assertTrue("H5 open ids is 0",H5.getOpenIDCount()==0);
System.out.print(testname.getMethodName());
try {
fapl_id = H5.H5Pcreate(HDF5Constants.H5P_FILE_ACCESS);
}
catch (Throwable err) {
err.printStackTrace();
fail("TestH5Pfapl.createFileAccess: " + err);
}
assertTrue(fapl_id > 0);
try {
plapl_id = H5.H5Pcreate(HDF5Constants.H5P_LINK_ACCESS);
}
catch (Throwable err) {
err.printStackTrace();
fail("TestH5Pfapl.createFileAccess: " + err);
}
assertTrue(plapl_id > 0);
try {
plist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER);
btplist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER);
dapl_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_ACCESS);
}
catch (Throwable err) {
err.printStackTrace();
fail("TestH5Pfapl.createFileAccess: " + err);
}
assertTrue(plist_id > 0);
assertTrue(btplist_id > 0);
assertTrue(dapl_id > 0);
}
@After
public void deleteFileAccess() throws HDF5LibraryException {
if (fapl_id > 0)
try {H5.H5Pclose(fapl_id);} catch (Exception ex) {}
if (plapl_id > 0)
try {H5.H5Pclose(plapl_id);} catch (Exception ex) {}
if (dapl_id > 0)
try {H5.H5Pclose(dapl_id);} catch (Exception ex) {}
if (plist_id > 0)
try {H5.H5Pclose(plist_id);} catch (Exception ex) {}
if (btplist_id > 0)
try {H5.H5Pclose(btplist_id);} catch (Exception ex) {}
if (H5Fdsid > 0)
try {H5.H5Sclose(H5Fdsid);} catch (Exception ex) {}
if (H5Fdid > 0)
try {H5.H5Dclose(H5Fdid);} catch (Exception ex) {}
if (H5dsid > 0)
try {H5.H5Sclose(H5dsid);} catch (Exception ex) {}
if (H5did > 0)
try {H5.H5Dclose(H5did);} catch (Exception ex) {}
if (H5fid > 0)
try {H5.H5Fclose(H5fid);} catch (Exception ex) {}
System.out.println();
}
@Test
public void testH5Pset_fapl_ros3()
throws Exception
{
if (HDF5Constants.H5FD_ROS3 < 0)
return;
final H5FD_ros3_fapl_t config = new H5FD_ros3_fapl_t();
assertEquals("Default fapl has unexpected contents",
new H5FD_ros3_fapl_t("", "", ""),
config);
H5.H5Pset_fapl_ros3(fapl_id, config);
assertEquals("driver types don't match",
HDF5Constants.H5FD_ROS3,
H5.H5Pget_driver(fapl_id));
/* get_fapl_ros3 can throw exception in error cases */
H5FD_ros3_fapl_t copy = H5.H5Pget_fapl_ros3(fapl_id);
assertEquals("contents of fapl set and get don't match",
new H5FD_ros3_fapl_t("", "", ""),
copy);
}
@Test(expected = HDF5LibraryException.class)
public void testH5Pget_fapl_ros3_invalid_fapl_id()
throws Exception
{
if (HDF5Constants.H5FD_ROS3 < 0)
throw new HDF5LibraryException("skip");
H5FD_ros3_fapl_t fails = H5.H5Pget_fapl_ros3(-1);
}
@Test(expected = HDF5LibraryException.class)
public void testH5Pget_fapl_ros3_fapl_id_of_wrong_driver_type()
throws Exception
{
if (HDF5Constants.H5FD_ROS3 < 0)
throw new HDF5LibraryException("skip");
if (HDF5Constants.H5FD_SEC2 < 0 )
throw new HDF5LibraryException("skip");
/* TODO: for now, test against a sec2 fapl only */
H5.H5Pset_fapl_sec2(fapl_id);
assertEquals("fapl_id was not set properly",
HDF5Constants.H5FD_SEC2,
H5.H5Pget_driver(fapl_id));
H5FD_ros3_fapl_t fails = H5.H5Pget_fapl_ros3(fapl_id);
}
@Test
public void testH5Pset_fapl_ros3_specified()
throws Exception
{
if (HDF5Constants.H5FD_ROS3 < 0)
return;
String region = "us-east-1";
String acc_id = "my_access_id";
String acc_key = "my_access_key";
final H5FD_ros3_fapl_t config = new H5FD_ros3_fapl_t(
region,
acc_id,
acc_key);
H5.H5Pset_fapl_ros3(fapl_id, config);
assertEquals("driver types don't match",
HDF5Constants.H5FD_ROS3,
H5.H5Pget_driver(fapl_id));
H5FD_ros3_fapl_t copy = H5.H5Pget_fapl_ros3(fapl_id);
assertEquals("contents of fapl set and get don't match",
new H5FD_ros3_fapl_t(region, acc_id, acc_key),
copy);
}
}

View File

@ -18,6 +18,8 @@ srcdir=@srcdir@
USE_FILTER_SZIP="@USE_FILTER_SZIP@"
USE_FILTER_DEFLATE="@USE_FILTER_DEFLATE@"
USE_ROS3_VFD="@HAVE_ROS3_VFD@"
USE_HDFS_VFD="@HAVE_LIBHDFS@"
TESTNAME=JUnitInterface
EXIT_SUCCESS=0
@ -93,6 +95,8 @@ $HDFTEST_HOME/testfiles/JUnit-TestH5R.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5P.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5PData.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5Pfapl.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5Pfapls3.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5Pfaplhdfs.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5Pvirtual.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5Plist.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5A.txt
@ -1096,6 +1100,50 @@ if test $USE_FILTER_SZIP = "yes"; then
test yes = "$verbose" && $DIFF JUnit-TestH5Giterate.txt JUnit-TestH5Giterate.out |sed 's/^/ /'
fi
fi
if test $ROS3_VFD = "yes"; then
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfapls3"
TESTING JUnit-TestH5Pfapls3
($RUNSERIAL $JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfapls3 > JUnit-TestH5Pfapls3.ext)
# Extract file name, line number, version and thread IDs because they may be different
sed -e 's/thread [0-9]*/thread (IDs)/' -e 's/: .*\.c /: (file name) /' \
-e 's/line [0-9]*/line (number)/' \
-e 's/Time: [0-9]*\.[0-9]*/Time: XXXX/' \
-e 's/v[1-9]*\.[0-9]*\./version (number)\./' \
-e 's/[1-9]*\.[0-9]*\.[0-9]*[^)]*/version (number)/' \
JUnit-TestH5Pfapls3.ext > JUnit-TestH5Pfapls3.out
if diff JUnit-TestH5Pfapls3.out JUnit-TestH5Pfapls3.txt > /dev/null; then
echo " PASSED JUnit-TestH5Pfapls3"
else
echo "**FAILED** JUnit-TestH5Pfapls3"
echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
test yes = "$verbose" && $DIFF JUnit-TestH5Pfapls3.txt JUnit-TestH5Pfapls3.out |sed 's/^/ /'
fi
fi
if test $HAVE_LIBHDFS = "yes"; then
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfaplhdfs"
TESTING JUnit-TestH5Pfaplhdfs
($RUNSERIAL $JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfaplhdfs > JUnit-TestH5Pfaplhdfs.ext)
# Extract file name, line number, version and thread IDs because they may be different
sed -e 's/thread [0-9]*/thread (IDs)/' -e 's/: .*\.c /: (file name) /' \
-e 's/line [0-9]*/line (number)/' \
-e 's/Time: [0-9]*\.[0-9]*/Time: XXXX/' \
-e 's/v[1-9]*\.[0-9]*\./version (number)\./' \
-e 's/[1-9]*\.[0-9]*\.[0-9]*[^)]*/version (number)/' \
JUnit-TestH5Pfaplhdfs.ext > JUnit-TestH5Pfaplhdfs.out
if diff JUnit-TestH5Pfaplhdfs.out JUnit-TestH5Pfaplhdfs.txt > /dev/null; then
echo " PASSED JUnit-TestH5Pfaplhdfs"
else
echo "**FAILED** JUnit-TestH5Pfaplhdfs"
echo " Expected result differs from actual result"
nerrors="`expr $nerrors + 1`"
test yes = "$verbose" && $DIFF JUnit-TestH5Pfaplhdfs.txt JUnit-TestH5Pfaplhdfs.out |sed 's/^/ /'
fi
fi
# Clean up temporary files/directories

View File

@ -0,0 +1,9 @@
JUnit version 4.11
.testH5Pget_fapl_hdfs_invalid_fapl_id
.testH5Pget_fapl_hdfs_fapl_id_of_wrong_driver_type
.testHDFS_fapl
Time: XXXX
OK (3 tests)

View File

@ -0,0 +1,10 @@
JUnit version 4.11
.testH5Pset_fapl_ros3_specified
.testH5Pset_fapl_ros3
.testH5Pget_fapl_ros3_invalid_fapl_id
.testH5Pget_fapl_ros3_fapl_id_of_wrong_driver_type
Time: XXXX
OK (4 tests)

View File

@ -227,11 +227,14 @@ set (H5FD_SOURCES
${HDF5_SRC_DIR}/H5FDcore.c
${HDF5_SRC_DIR}/H5FDdirect.c
${HDF5_SRC_DIR}/H5FDfamily.c
${HDF5_SRC_DIR}/H5FDhdfs.c
${HDF5_SRC_DIR}/H5FDint.c
${HDF5_SRC_DIR}/H5FDlog.c
${HDF5_SRC_DIR}/H5FDmpi.c
${HDF5_SRC_DIR}/H5FDmpio.c
${HDF5_SRC_DIR}/H5FDmulti.c
${HDF5_SRC_DIR}/H5FDros3.c
${HDF5_SRC_DIR}/H5FDs3comms.c
${HDF5_SRC_DIR}/H5FDsec2.c
${HDF5_SRC_DIR}/H5FDspace.c
${HDF5_SRC_DIR}/H5FDstdio.c
@ -243,11 +246,14 @@ set (H5FD_HDRS
${HDF5_SRC_DIR}/H5FDcore.h
${HDF5_SRC_DIR}/H5FDdirect.h
${HDF5_SRC_DIR}/H5FDfamily.h
${HDF5_SRC_DIR}/H5FDhdfs.h
${HDF5_SRC_DIR}/H5FDlog.h
${HDF5_SRC_DIR}/H5FDmpi.h
${HDF5_SRC_DIR}/H5FDmpio.h
${HDF5_SRC_DIR}/H5FDmulti.h
${HDF5_SRC_DIR}/H5FDpublic.h
${HDF5_SRC_DIR}/H5FDros3.h
${HDF5_SRC_DIR}/H5FDs3comms.c
${HDF5_SRC_DIR}/H5FDsec2.h
${HDF5_SRC_DIR}/H5FDstdio.h
${HDF5_SRC_DIR}/H5FDwindows.h
@ -1142,6 +1148,7 @@ if (BUILD_SHARED_LIBS)
add_library (${HDF5_LIBSH_TARGET} SHARED ${common_SRCS} ${shared_gen_SRCS} ${H5_PUBLIC_HEADERS} ${H5_PRIVATE_HEADERS} ${H5_GENERATED_HEADERS})
target_include_directories (${HDF5_LIBSH_TARGET}
PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
PUBLIC "$<$<BOOL:${HDF5_ENABLE_HDFS}>:${HDFS_INCLUDE_DIR}>"
INTERFACE "$<INSTALL_INTERFACE:$<INSTALL_PREFIX>/include>"
)
target_compile_definitions(${HDF5_LIBSH_TARGET}

2070
src/H5FDhdfs.c Normal file

File diff suppressed because it is too large Load Diff

122
src/H5FDhdfs.h Normal file
View File

@ -0,0 +1,122 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Read-Only HDFS Virtual File Driver (VFD) *
* Copyright (c) 2018, The HDF Group. *
* *
* All rights reserved. *
* *
* NOTICE: *
* All information contained herein is, and remains, the property of The HDF *
* Group. The intellectual and technical concepts contained herein are *
* proprietary to The HDF Group. Dissemination of this information or *
* reproduction of this material is strictly forbidden unless prior written *
* permission is obtained from The HDF Group. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Programmer: Jacob Smith
* 2018-04-23
*
* Purpose: The public header file for the hdfs driver.
*/
#ifndef H5FDhdfs_H
#define H5FDhdfs_H
#define H5FD_HDFS (H5FD_hdfs_init())
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************
*
* Structure: H5FD_hdfs_fapl_t
*
* Purpose:
*
* H5FD_hdfs_fapl_t is a public structure that is used to pass
* configuration information to the appropriate HDFS VFD via the FAPL.
* A pointer to an instance of this structure is a parameter to
* H5Pset_fapl_hdfs() and H5Pget_fapl_hdfs().
*
*
*
* `version` (int32_t)
*
* Version number of the `H5FD_hdfs_fapl_t` structure. Any instance passed
* to the above calls must have a recognized version number, or an error
* will be flagged.
*
* This field should be set to `H5FD__CURR_HDFS_FAPL_T_VERSION`.
*
* `namenode_name` (const char[])
*
* Name of "Name Node" to access as the HDFS server.
*
* Must not be longer than `H5FD__HDFS_NODE_NAME_SPACE`.
*
* TBD: Can be NULL.
*
* `namenode_port` (int32_t) TBD
*
* Port number to use to connect with Name Node.
*
* TBD: If 0, uses a default port.
*
* `kerberos_ticket_cache` (const char[])
*
* Path to the location of the Kerberos authentication cache.
*
* Must not be longer than `H5FD__HDFS_KERB_CACHE_PATH_SPACE`.
*
* TBD: Can be NULL.
*
* `user_name` (const char[])
*
* Username to use when accessing file.
*
* Must not be longer than `H5FD__HDFS_USER_NAME_SPACE`.
*
* TBD: Can be NULL.
*
* `stream_buffer_size` (int32_t)
*
* Size (in bytes) of the file read stream buffer.
*
* TBD: If -1, relies on a default value.
*
*
*
* Programmer: Jacob Smith
* 2018-04-23
*
* Changes: None
*
****************************************************************************/
#define H5FD__CURR_HDFS_FAPL_T_VERSION 1
#define H5FD__HDFS_NODE_NAME_SPACE 128
#define H5FD__HDFS_USER_NAME_SPACE 128
#define H5FD__HDFS_KERB_CACHE_PATH_SPACE 128
typedef struct H5FD_hdfs_fapl_t {
int32_t version;
char namenode_name[H5FD__HDFS_NODE_NAME_SPACE + 1];
int32_t namenode_port;
char user_name[H5FD__HDFS_USER_NAME_SPACE + 1];
char kerberos_ticket_cache[H5FD__HDFS_KERB_CACHE_PATH_SPACE + 1];
int32_t stream_buffer_size;
} H5FD_hdfs_fapl_t;
H5_DLL hid_t H5FD_hdfs_init(void);
H5_DLL herr_t H5Pget_fapl_hdfs(hid_t fapl_id, H5FD_hdfs_fapl_t *fa_out);
H5_DLL herr_t H5Pset_fapl_hdfs(hid_t fapl_id, H5FD_hdfs_fapl_t *fa);
#ifdef __cplusplus
}
#endif
#endif /* ifndef H5FDhdfs_H */

1847
src/H5FDros3.c Normal file

File diff suppressed because it is too large Load Diff

105
src/H5FDros3.h Normal file
View File

@ -0,0 +1,105 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Read-Only S3 Virtual File Driver (VFD)
*
* Programmer: John Mainzer
* 2017-10-10
*
* Purpose: The public header file for the ros3 driver.
*/
#ifndef H5FDros3_H
#define H5FDros3_H
#define H5FD_ROS3 (H5FD_ros3_init())
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************
*
* Structure: H5FD_ros3_fapl_t
*
* Purpose:
*
* H5FD_ros3_fapl_t is a public structure that is used to pass S3
* authentication data to the appropriate S3 VFD via the FAPL. A pointer
* to an instance of this structure is a parameter to H5Pset_fapl_ros3()
* and H5Pget_fapl_ros3().
*
*
*
* `version` (int32_t)
*
* Version number of the H5FD_ros3_fapl_t structure. Any instance passed
* to the above calls must have a recognized version number, or an error
* will be flagged.
*
* This field should be set to H5FD__CURR_ROS3_FAPL_T_VERSION.
*
* `authenticate` (hbool_t)
*
* Flag TRUE or FALSE whether or not requests are to be authenticated
* with the AWS4 algorithm.
* If TRUE, `aws_region`, `secret_id`, and `secret_key` must be populated.
* If FALSE, those three components are unused.
*
* `aws_region` (char[])
*
* String: name of the AWS "region" of the host, e.g. "us-east-1".
*
* `secret_id` (char[])
*
* String: "Access ID" for the resource.
*
* `secret_key` (char[])
*
* String: "Secret Access Key" associated with the ID and resource.
*
*
*
* Programmer: John Mainzer
*
* Changes:
*
* - Add documentation of fields (except `version`)
* --- Jacob Smith 2017-12-04
*
****************************************************************************/
#define H5FD__CURR_ROS3_FAPL_T_VERSION 1
#define H5FD__ROS3_MAX_REGION_LEN 32
#define H5FD__ROS3_MAX_SECRET_ID_LEN 128
#define H5FD__ROS3_MAX_SECRET_KEY_LEN 128
typedef struct H5FD_ros3_fapl_t {
int32_t version;
hbool_t authenticate;
char aws_region[H5FD__ROS3_MAX_REGION_LEN + 1];
char secret_id[H5FD__ROS3_MAX_SECRET_ID_LEN + 1];
char secret_key[H5FD__ROS3_MAX_SECRET_KEY_LEN + 1];
} H5FD_ros3_fapl_t;
H5_DLL hid_t H5FD_ros3_init(void);
H5_DLL herr_t H5Pget_fapl_ros3(hid_t fapl_id, H5FD_ros3_fapl_t * fa_out);
H5_DLL herr_t H5Pset_fapl_ros3(hid_t fapl_id, H5FD_ros3_fapl_t * fa);
#ifdef __cplusplus
}
#endif
#endif /* ifndef H5FDros3_H */

3775
src/H5FDs3comms.c Normal file

File diff suppressed because it is too large Load Diff

633
src/H5FDs3comms.h Normal file
View File

@ -0,0 +1,633 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
* Read-Only S3 Virtual File Driver (VFD)
*
* This is the header for the S3 Communications module
*
* ***NOT A FILE DRIVER***
*
* Purpose:
*
* - Provide structures and functions related to communicating with
* Amazon S3 (Simple Storage Service).
* - Abstract away the REST API (HTTP,
* networked communications) behind a series of uniform function calls.
* - Handle AWS4 authentication, if appropriate.
* - Fail predictably in event of errors.
* - Eventually, support more S3 operations, such as creating, writing to,
* and removing Objects remotely.
*
* translates:
* `read(some_file, bytes_offset, bytes_length, &dest_buffer);`
* to:
* ```
* GET myfile HTTP/1.1
* Host: somewhere.me
* Range: bytes=4096-5115
* ```
* and places received bytes from HTTP response...
* ```
* HTTP/1.1 206 Partial-Content
* Content-Range: 4096-5115/63239
*
* <bytes>
* ```
* ...in destination buffer.
*
* TODO: put documentation in a consistent place and point to it from here.
*
* Programmer: Jacob Smith
* 2017-11-30
*
*****************************************************************************/
#include <ctype.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#ifdef H5_HAVE_ROS3_VFD
#include <curl/curl.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/sha.h>
#endif /* ifdef H5_HAVE_ROS3_VFD */
/*****************
* PUBLIC MACROS *
*****************/
/* hexadecimal string of pre-computed sha256 checksum of the empty string
* hex(sha256sum(""))
*/
#define EMPTY_SHA256 \
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
/* string length (plus null terminator)
* example ISO8601-format string: "20170713T145903Z" (YYYYmmdd'T'HHMMSS'_')
*/
#define ISO8601_SIZE 17
/* string length (plus null terminator)
* example RFC7231-format string: "Fri, 30 Jun 2017 20:41:55 GMT"
*/
#define RFC7231_SIZE 30
/*---------------------------------------------------------------------------
*
* Macro: ISO8601NOW()
*
* Purpose:
*
* write "YYYYmmdd'T'HHMMSS'Z'" (less single-quotes) to dest
* e.g., "20170630T204155Z"
*
* wrapper for strftime()
*
* It is left to the programmer to check return value of
* ISO8601NOW (should equal ISO8601_SIZE - 1).
*
* Programmer: Jacob Smith
* 2017-07-??
*
*---------------------------------------------------------------------------
*/
#define ISO8601NOW(dest, now_gm) \
strftime((dest), ISO8601_SIZE, "%Y%m%dT%H%M%SZ", (now_gm))
/*---------------------------------------------------------------------------
*
* Macro: RFC7231NOW()
*
* Purpose:
*
* write "Day, dd Mmm YYYY HH:MM:SS GMT" to dest
* e.g., "Fri, 30 Jun 2017 20:41:55 GMT"
*
* wrapper for strftime()
*
* It is left to the programmer to check return value of
* RFC7231NOW (should equal RFC7231_SIZE - 1).
*
* Programmer: Jacob Smith
* 2017-07-??
*
*---------------------------------------------------------------------------
*/
#define RFC7231NOW(dest, now_gm) \
strftime((dest), RFC7231_SIZE, "%a, %d %b %Y %H:%M:%S GMT", (now_gm))
/* Reasonable maximum length of a credential string.
* Provided for error-checking S3COMMS_FORMAT_CREDENTIAL (below).
* 17 <- "////aws4_request\0"
* 2 < "s3" (service)
* 8 <- "YYYYmmdd" (date)
* 128 <- (access_id)
* 155 :: sum
*/
#define S3COMMS_MAX_CREDENTIAL_SIZE 155
/*---------------------------------------------------------------------------
*
* Macro: H5FD_S3COMMS_FORMAT_CREDENTIAL()
*
* Purpose:
*
* Format "S3 Credential" string from inputs, for AWS4.
*
* Wrapper for HDsnprintf().
*
* _HAS NO ERROR-CHECKING FACILITIES_
* It is left to programmer to ensure that return value confers success.
* e.g.,
* ```
* assert( S3COMMS_MAX_CREDENTIAL_SIZE >=
* S3COMMS_FORMAT_CREDENTIAL(...) );
* ```
*
* "<access-id>/<date>/<aws-region>/<aws-service>/aws4_request"
* assuming that `dest` has adequate space.
*
* ALL inputs must be null-terminated strings.
*
* `access` should be the user's access key ID.
* `date` must be of format "YYYYmmdd".
* `region` should be relevant AWS region, i.e. "us-east-1".
* `service` should be "s3".
*
* Programmer: Jacob Smith
* 2017-09-19
*
* Changes: None.
*
*---------------------------------------------------------------------------
*/
#define S3COMMS_FORMAT_CREDENTIAL(dest, access, iso8601_date, region, service) \
HDsnprintf((dest), S3COMMS_MAX_CREDENTIAL_SIZE, \
"%s/%s/%s/%s/aws4_request", \
(access), (iso8601_date), (region), (service))
/*********************
* PUBLIC STRUCTURES *
*********************/
/*----------------------------------------------------------------------------
*
* Structure: hrb_node_t
*
* HTTP Header Field Node
*
*
*
* Maintain a ordered (linked) list of HTTP Header fields.
*
* Provides efficient access and manipulation of a logical sequence of
* HTTP header fields, of particular use when composing an
* "S3 Canonical Request" for authentication.
*
* - The creation of a Canoncial Request involves:
* - convert field names to lower case
* - sort by this lower-case name
* - convert ": " name-value separator in HTTP string to ":"
* - get sorted lowercase names without field or separator
*
* As HTTP headers allow headers in any order (excepting the case of multiple
* headers with the same name), the list ordering can be optimized for Canonical
* Request creation, suggesting alphabtical order. For more expedient insertion
* and removal of elements in the list, linked list seems preferable to a
* dynamically-expanding array. The usually-smaller number of entries (5 or
* fewer) makes performance overhead of traversing the list trivial.
*
* The above requirements of creating at Canonical Request suggests a reasonable
* trade-off of speed for space with the option to compute elements as needed
* or to have the various elements prepared and stored in the structure
* (e.g. name, value, lowername, concatenated name:value)
* The structure currently is implemented to pre-compute.
*
* At all times, the "first" node of the list should be the least,
* alphabetically. For all nodes, the `next` node should be either NULL or
* of greater alphabetical value.
*
* Each node contains its own header field information, plus a pointer to the
* next node.
*
* It is not allowed to have multiple nodes with the same _lowercase_ `name`s
* in the same list
* (i.e., name is case-insensitive for access and modification.)
*
* All data (`name`, `value`, `lowername`, and `cat`) are null-terminated
* strings allocated specifically for their node.
*
*
*
* `magic` (unsigned long)
*
* "unique" idenfier number for the structure type
*
* `name` (char *)
*
* Case-meaningful name of the HTTP field.
* Given case is how it is supplied to networking code.
* e.g., "Range"
*
* `lowername` (char *)
*
* Lowercase copy of name.
* e.g., "range"
*
* `value` (char *)
*
* Case-meaningful value of HTTP field.
* e.g., "bytes=0-9"
*
* `cat` (char *)
*
* Concatenated, null-terminated string of HTTP header line,
* as the field would appear in an HTTP request.
* e.g., "Range: bytes=0-9"
*
* `next` (hrb_node_t *)
*
* Pointers to next node in the list, or NULL sentinel as end of list.
* Next node must have a greater `lowername` as determined by strcmp().
*
*
*
* Programmer: Jacob Smith
* 2017-09-22
*
* Changes:
*
* - Change from twin doubly-linked lists to singly-linked list.
* --- Jake Smith 2017-01-17
*
*----------------------------------------------------------------------------
*/
typedef struct hrb_node_t {
unsigned long magic;
char *name;
char *value;
char *cat;
char *lowername;
struct hrb_node_t *next;
} hrb_node_t;
#define S3COMMS_HRB_NODE_MAGIC 0x7F5757UL
/*----------------------------------------------------------------------------
*
* Structure: hrb_t
*
* HTTP Request Buffer structure
*
*
*
* Logically represent an HTTP request
*
* GET /myplace/myfile.h5 HTTP/1.1
* Host: over.rainbow.oz
* Date: Fri, 01 Dec 2017 12:35:04 CST
*
* <body>
*
* ...with fast, efficient access to and modification of primary and field
* elements.
*
* Structure for building HTTP requests while hiding much of the string
* processing required "under the hood."
*
* Information about the request target -- the first line -- and the body text,
* if any, are managed directly with this structure. All header fields, e.g.,
* "Host" and "Date" above, are created with a linked list of `hrb_node_t` and
* included in the request by a pointer to the head of the list.
*
*
*
* `magic` (unsigned long)
*
* "Magic" number confirming that this is an hrb_t structure and
* what operations are valid for it.
*
* Must be S3COMMS_HRB_MAGIC to be valid.
*
* `body` (char *) :
*
* Pointer to start of HTTP body.
*
* Can be NULL, in which case it is treated as the empty string, "".
*
* `body_len` (size_t) :
*
* Number of bytes (characters) in `body`. 0 if empty or NULL `body`.
*
* `first_header` (hrb_node_t *) :
*
* Pointer to first SORTED header node, if any.
* It is left to the programmer to ensure that this node and associated
* list is destroyed when done.
*
* `resource` (char *) :
*
* Pointer to resource URL string, e.g., "/folder/page.xhtml".
*
* `verb` (char *) :
*
* Pointer to HTTP verb string, e.g., "GET".
*
* `version` (char *) :
*
* Pointer to HTTP version string, e.g., "HTTP/1.1".
*
*
*
* Programmer: Jacob Smith
*
*----------------------------------------------------------------------------
*/
typedef struct {
unsigned long magic;
char *body;
size_t body_len;
hrb_node_t *first_header;
char *resource;
char *verb;
char *version;
} hrb_t;
#define S3COMMS_HRB_MAGIC 0x6DCC84UL
/*----------------------------------------------------------------------------
*
* Structure: parsed_url_t
*
*
* Represent a URL with easily-accessed pointers to logical elements within.
* These elements (components) are stored as null-terminated strings (or just
* NULLs). These components should be allocated for the structure, making the
* data as safe as possible from modification. If a component is NULL, it is
* either implicit in or absent from the URL.
*
* "http://mybucket.s3.amazonaws.com:8080/somefile.h5?param=value&arg=value"
* ^--^ ^-----------------------^ ^--^ ^---------^ ^-------------------^
* Scheme Host Port Resource Query/-ies
*
*
*
* `magic` (unsigned long)
*
* Structure identification and validation identifier.
* Identifies as `parsed_url_t` type.
*
* `scheme` (char *)
*
* String representing which protocol is to be expected.
* _Must_ be present.
* "http", "https", "ftp", e.g.
*
* `host` (char *)
*
* String of host, either domain name, IPv4, or IPv6 format.
* _Must_ be present.
* "over.rainbow.oz", "192.168.0.1", "[0000:0000:0000:0001]"
*
* `port` (char *)
*
* String representation of specified port. Must resolve to a valid unsigned
* integer.
* "9000", "80"
*
* `path` (char *)
*
* Path to resource on host. If not specified, assumes root "/".
* "lollipop_guild.wav", "characters/witches/white.dat"
*
* `query` (char *)
*
* Single string of all query parameters in url (if any).
* "arg1=value1&arg2=value2"
*
*
*
* Programmer: Jacob Smith
*
*----------------------------------------------------------------------------
*/
typedef struct {
unsigned long magic;
char *scheme; /* required */
char *host; /* required */
char *port;
char *path;
char *query;
} parsed_url_t;
#define S3COMMS_PARSED_URL_MAGIC 0x21D0DFUL
/*----------------------------------------------------------------------------
*
* Structure: s3r_t
*
*
*
* S3 request structure "handle".
*
* Holds persistent information for Amazon S3 requests.
*
* Instantiated through `H5FD_s3comms_s3r_open()`, copies data into self.
*
* Intended to be re-used for operations on a remote object.
*
* Cleaned up through `H5FD_s3comms_s3r_close()`.
*
* _DO NOT_ share handle between threads: curl easy handle `curlhandle` has
* undefined behavior if called to perform in multiple threads.
*
*
*
* `magic` (unsigned long)
*
* "magic" number identifying this structure as unique type.
* MUST equal `S3R_MAGIC` to be valid.
*
* `curlhandle` (CURL)
*
* Pointer to the curl_easy handle generated for the request.
*
* `httpverb` (char *)
*
* Pointer to NULL-terminated string. HTTP verb,
* e.g. "GET", "HEAD", "PUT", etc.
*
* Default is NULL, resulting in a "GET" request.
*
* `purl` (parsed_url_t *)
*
* Pointer to structure holding the elements of URL for file open.
*
* e.g., "http://bucket.aws.com:8080/myfile.dat?q1=v1&q2=v2"
* parsed into...
* { scheme: "http"
* host: "bucket.aws.com"
* port: "8080"
* path: "myfile.dat"
* query: "q1=v1&q2=v2"
* }
*
* Cannot be NULL.
*
* `region` (char *)
*
* Pointer to NULL-terminated string, specifying S3 "region",
* e.g., "us-east-1".
*
* Required to authenticate.
*
* `secret_id` (char *)
*
* Pointer to NULL-terminated string for "secret" access id to S3 resource.
*
* Requred to authenticate.
*
* `signing_key` (unsigned char *)
*
* Pointer to `SHA256_DIGEST_LENGTH`-long string for "re-usable" signing
* key, generated via
* `HMAC-SHA256(HMAC-SHA256(HMAC-SHA256(HMAC-SHA256("AWS4<secret_key>",
* "<yyyyMMDD"), "<aws-region>"), "<aws-service>"), "aws4_request")`
* which may be re-used for several (up to seven (7)) days from creation?
* Computed once upon file open.
*
* Requred to authenticate.
*
*
*
* Programmer: Jacob Smith
*
*----------------------------------------------------------------------------
*/
typedef struct {
unsigned long magic;
#ifdef H5_HAVE_ROS3_VFD
CURL *curlhandle;
size_t filesize;
char *httpverb;
parsed_url_t *purl;
char *region;
char *secret_id;
unsigned char *signing_key;
#endif /* ifdef H5_HAVE_ROS3_VFD */
} s3r_t;
#define S3COMMS_S3R_MAGIC 0x44d8d79
/*******************************************
* DECLARATION OF HTTP FIELD LIST ROUTINES *
*******************************************/
herr_t H5FD_s3comms_hrb_node_set(hrb_node_t **L,
const char *name,
const char *value);
/***********************************************
* DECLARATION OF HTTP REQUEST BUFFER ROUTINES *
***********************************************/
herr_t H5FD_s3comms_hrb_destroy(hrb_t **buf);
hrb_t * H5FD_s3comms_hrb_init_request(const char *verb,
const char *resource,
const char *host);
/*************************************
* DECLARATION OF S3REQUEST ROUTINES *
*************************************/
H5_DLL herr_t H5FD_s3comms_s3r_close(s3r_t *handle);
H5_DLL size_t H5FD_s3comms_s3r_get_filesize(s3r_t *handle);
H5_DLL s3r_t * H5FD_s3comms_s3r_open(const char url[],
const char region[],
const char id[],
const unsigned char signing_key[]);
H5_DLL herr_t H5FD_s3comms_s3r_read(s3r_t *handle,
haddr_t offset,
size_t len,
void *dest);
/*********************************
* DECLARATION OF OTHER ROUTINES *
*********************************/
H5_DLL struct tm * gmnow(void);
herr_t H5FD_s3comms_aws_canonical_request(char *canonical_request_dest,
char *signed_headers_dest,
hrb_t *http_request);
H5_DLL herr_t H5FD_s3comms_bytes_to_hex(char *dest,
const unsigned char *msg,
size_t msg_len,
hbool_t lowercase);
herr_t H5FD_s3comms_free_purl(parsed_url_t *purl);
herr_t H5FD_s3comms_HMAC_SHA256(const unsigned char *key,
size_t key_len,
const char *msg,
size_t msg_len,
char *dest);
herr_t H5FD_s3comms_load_aws_profile(const char *name,
char *key_id_out,
char *secret_access_key_out,
char *aws_region_out);
herr_t H5FD_s3comms_nlowercase(char *dest,
const char *s,
size_t len);
herr_t H5FD_s3comms_parse_url(const char *str,
parsed_url_t **purl);
herr_t H5FD_s3comms_percent_encode_char(char *repr,
const unsigned char c,
size_t *repr_len);
H5_DLL herr_t H5FD_s3comms_signing_key(unsigned char *md,
const char *secret,
const char *region,
const char *iso8601now);
herr_t H5FD_s3comms_tostringtosign(char *dest,
const char *req_str,
const char *now,
const char *region);
H5_DLL herr_t H5FD_s3comms_trim(char *dest,
char *s,
size_t s_len,
size_t *n_written);
H5_DLL herr_t H5FD_s3comms_uriencode(char *dest,
const char *s,
size_t s_len,
hbool_t encode_slash,
size_t *n_written);

View File

@ -63,8 +63,8 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5FA.c H5FAcache.c H5FAdbg.c H5FAdblock.c H5FAdblkpage.c H5FAhdr.c \
H5FAint.c H5FAstat.c H5FAtest.c \
H5FD.c H5FDcore.c \
H5FDfamily.c H5FDint.c H5FDlog.c \
H5FDmulti.c H5FDsec2.c H5FDspace.c H5FDstdio.c H5FDtest.c \
H5FDfamily.c H5FDhdfs.c H5FDint.c H5FDlog.c H5FDs3comms.c \
H5FDmulti.c H5FDros3.c H5FDsec2.c H5FDspace.c H5FDstdio.c H5FDtest.c \
H5FL.c H5FO.c H5FS.c H5FScache.c H5FSdbg.c H5FSint.c H5FSsection.c \
H5FSstat.c H5FStest.c \
H5G.c H5Gbtree2.c H5Gcache.c \
@ -138,8 +138,8 @@ include_HEADERS = hdf5.h H5api_adpt.h H5overflow.h H5pubconf.h H5public.h H5vers
H5Cpublic.h H5Dpublic.h \
H5Epubgen.h H5Epublic.h H5ESpublic.h H5Fpublic.h \
H5FDpublic.h H5FDcore.h H5FDdirect.h \
H5FDfamily.h H5FDlog.h H5FDmpi.h H5FDmpio.h \
H5FDmulti.h H5FDsec2.h H5FDstdio.h H5FDwindows.h \
H5FDfamily.h H5FDhdfs.h H5FDlog.h H5FDmpi.h H5FDmpio.h \
H5FDmulti.h H5FDros3.h H5FDsec2.h H5FDstdio.h H5FDwindows.h \
H5Gpublic.h H5Ipublic.h H5Lpublic.h \
H5MMpublic.h H5Opublic.h H5Ppublic.h \
H5PLextern.h H5PLpublic.h \

View File

@ -40,16 +40,18 @@
#include "H5Zpublic.h" /* Data filters */
/* Predefined file drivers */
#include "H5FDcore.h" /* Files stored entirely in memory */
#include "H5FDdirect.h" /* Linux direct I/O */
#include "H5FDfamily.h" /* File families */
#include "H5FDcore.h" /* Files stored entirely in memory */
#include "H5FDdirect.h" /* Linux direct I/O */
#include "H5FDfamily.h" /* File families */
#include "H5FDhdfs.h" /* Hadoop HDFS */
#include "H5FDlog.h" /* sec2 driver with I/O logging (for debugging) */
#include "H5FDmpi.h" /* MPI-based file drivers */
#include "H5FDmulti.h" /* Usage-partitioned file family */
#include "H5FDsec2.h" /* POSIX unbuffered file I/O */
#include "H5FDstdio.h" /* Standard C buffered I/O */
#include "H5FDmpi.h" /* MPI-based file drivers */
#include "H5FDmulti.h" /* Usage-partitioned file family */
#include "H5FDros3.h" /* R/O S3 "file" I/O */
#include "H5FDsec2.h" /* POSIX unbuffered file I/O */
#include "H5FDstdio.h" /* Standard C buffered I/O */
#ifdef H5_HAVE_WINDOWS
#include "H5FDwindows.h" /* Win32 I/O */
#include "H5FDwindows.h" /* Win32 I/O */
#endif
/* Virtual object layer (VOL) connectors */

View File

@ -79,6 +79,8 @@ Parallel Filtered Dataset Writes: @PARALLEL_FILTERED_WRITES@
I/O filters (external): @EXTERNAL_FILTERS@
MPE: @MPE@
Direct VFD: @DIRECT_VFD@
(Read-Only) S3 VFD: @ROS3_VFD@
(Read-Only) HDFS VFD: @HAVE_LIBHDFS@
dmalloc: @HAVE_DMALLOC@
Packages w/ extra debug output: @INTERNAL_DEBUG_OUTPUT@
API tracing: @TRACE_API@

View File

@ -267,6 +267,9 @@ set (H5_TESTS
enc_dec_plist_cross_platform
getname
vfd
ros3
s3comms
hdfs
ntypes
dangle
dtransform

View File

@ -18,6 +18,7 @@
# included from CMakeTEsts.cmake
set (VFD_LIST
hdfs
sec2
stdio
core

View File

@ -59,11 +59,11 @@ TEST_PROG= testhdf5 \
stab gheap evict_on_close farray earray btree2 fheap \
pool accum hyperslab istore bittests dt_arith page_buffer \
dtypes dsets cmpd_dset filter_fail extend direct_chunk external efc \
objcopy links unlink twriteorder big mtime fillval mount \
flush1 flush2 app_ref enum set_extent ttsafe enc_dec_plist \
enc_dec_plist_cross_platform getname vfd ntypes dangle dtransform \
reserved cross_read freespace mf vds file_image unregister \
cache_logging cork swmr vol
objcopy links unlink twriteorder big mtime fillval mount flush1 \
flush2 app_ref enum set_extent ttsafe enc_dec_plist \
enc_dec_plist_cross_platform getname vfd ros3 s3comms hdfs ntypes \
dangle dtransform reserved cross_read freespace mf vds file_image \
unregister cache_logging cork swmr vol
# List programs to be built when testing here.
# error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh.
@ -145,7 +145,7 @@ ttsafe_SOURCES=ttsafe.c ttsafe_dcreate.c ttsafe_error.c ttsafe_cancel.c \
ttsafe_acreate.c
cache_image_SOURCES=cache_image.c genall5.c
VFD_LIST = sec2 stdio core core_paged split multi family
VFD_LIST = hdfs sec2 stdio core core_paged split multi family
if DIRECT_VFD_CONDITIONAL
VFD_LIST += direct
endif
@ -201,8 +201,8 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
dtransform.h5 test_filters.h5 get_file_name.h5 tstint[1-2].h5 \
unlink_chunked.h5 btree2.h5 btree2_tmp.h5 objcopy_src.h5 objcopy_dst.h5 \
objcopy_ext.dat trefer1.h5 trefer2.h5 app_ref.h5 farray.h5 farray_tmp.h5 \
earray.h5 earray_tmp.h5 efc[0-5].h5 log_vfd_out.log \
new_multi_file_v16-r.h5 new_multi_file_v16-s.h5 \
earray.h5 earray_tmp.h5 efc[0-5].h5 log_vfd_out.log log_ros3_out.log \
log_s3comms_out.log new_multi_file_v16-r.h5 new_multi_file_v16-s.h5 \
split_get_file_image_test-m.h5 split_get_file_image_test-r.h5 \
file_image_core_test.h5.copy unregister_filter_1.h5 unregister_filter_2.h5 \
vds_virt.h5 vds_dapl.h5 vds_src_[0-1].h5 \

1836
test/hdfs.c Normal file

File diff suppressed because it is too large Load Diff

2020
test/ros3.c Normal file

File diff suppressed because it is too large Load Diff

2813
test/s3comms.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -58,6 +58,7 @@ const char *FILENAME[] = {
"stdio_file", /*7*/
"windows_file", /*8*/
"new_multi_file_v16",/*9*/
"ro_s3_file6", /*10*/
NULL
};
@ -66,7 +67,7 @@ const char *FILENAME[] = {
#define COMPAT_BASENAME "family_v16_"
#define MULTI_COMPAT_BASENAME "multi_file_v16"
/*-------------------------------------------------------------------------
* Function: test_sec2
*
@ -178,7 +179,7 @@ error:
return -1;
} /* end test_sec2() */
/*-------------------------------------------------------------------------
* Function: test_core
*
@ -534,7 +535,7 @@ error:
return -1;
} /* end test_core() */
/*-------------------------------------------------------------------------
* Function: test_direct
*
@ -754,7 +755,7 @@ error:
#endif /*H5_HAVE_DIRECT*/
}
/*-------------------------------------------------------------------------
* Function: test_family_opens
*
@ -835,7 +836,7 @@ error:
} /* end test_family_opens() */
#pragma GCC diagnostic pop
/*-------------------------------------------------------------------------
* Function: test_family
*
@ -1017,7 +1018,7 @@ error:
return -1;
}
/*-------------------------------------------------------------------------
* Function: test_family_compat
*
@ -1129,7 +1130,7 @@ error:
} /* end test_family_compat() */
#pragma GCC diagnostic pop
/*-------------------------------------------------------------------------
* Function: test_multi_opens
*
@ -1170,7 +1171,7 @@ test_multi_opens(char *fname)
} /* end test_multi_opens() */
#pragma GCC diagnostic pop
/*-------------------------------------------------------------------------
* Function: test_multi
*
@ -1404,7 +1405,7 @@ error:
return FAIL;
} /* end test_multi() */
/*-------------------------------------------------------------------------
* Function: test_multi_compat
*
@ -1578,7 +1579,7 @@ error:
return -1;
}
/*-------------------------------------------------------------------------
* Function: test_log
*
@ -1689,7 +1690,7 @@ error:
return -1;
}
/*-------------------------------------------------------------------------
* Function: test_stdio
*
@ -1794,7 +1795,7 @@ error:
}
/*-------------------------------------------------------------------------
* Function: test_windows
*
@ -1916,7 +1917,146 @@ error:
} /* end test_windows() */
/*-------------------------------------------------------------------------
* Function: test_ros3
*
* Purpose: Tests the file handle interface for the ROS3 driver
*
* As the ROS3 driver is 1) read only, 2) requires access
* to an S3 server (minio for now), this test is quite
* different from the other tests.
*
* For now, test only fapl & flags. Extend as the
* work on the VFD continues.
*
* Return: Success: 0
* Failure: -1
*
* Programmer: John Mainzer
* 7/12/17
*
*-------------------------------------------------------------------------
*/
static herr_t
test_ros3(void)
{
hid_t fid = -1; /* file ID */
hid_t fapl_id = -1; /* file access property list ID */
hid_t fapl_id_out = -1; /* from H5Fget_access_plist */
hid_t driver_id = -1; /* ID for this VFD */
unsigned long driver_flags = 0; /* VFD feature flags */
char filename[1024]; /* filename */
void *os_file_handle = NULL; /* OS file handle */
hsize_t file_size; /* file size */
H5FD_ros3_fapl_t test_ros3_fa;
H5FD_ros3_fapl_t ros3_fa_0 =
{
/* version = */ H5FD__CURR_ROS3_FAPL_T_VERSION,
/* authenticate = */ FALSE,
/* aws_region = */ "",
/* secret_id = */ "",
/* secret_key = */ "plugh",
};
TESTING("ROS3 file driver");
/* Set property list and file name for ROS3 driver. */
if((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
TEST_ERROR;
if(H5Pset_fapl_ros3(fapl_id, &ros3_fa_0) < 0)
TEST_ERROR;
/* verify that the ROS3 FAPL entry is set as expected */
if(H5Pget_fapl_ros3(fapl_id, &test_ros3_fa) < 0)
TEST_ERROR;
/* need a macro to compare instances of H5FD_ros3_fapl_t */
if((test_ros3_fa.version != ros3_fa_0.version) ||
(test_ros3_fa.authenticate != ros3_fa_0.authenticate) ||
(strcmp(test_ros3_fa.aws_region, ros3_fa_0.aws_region) != 0) ||
(strcmp(test_ros3_fa.secret_id, ros3_fa_0.secret_id) != 0) ||
(strcmp(test_ros3_fa.secret_key, ros3_fa_0.secret_key) != 0))
TEST_ERROR;
h5_fixname(FILENAME[10], fapl_id, filename, sizeof(filename));
/* Check that the VFD feature flags are correct */
if ((driver_id = H5Pget_driver(fapl_id)) < 0)
TEST_ERROR;
if (H5FDdriver_query(driver_id, &driver_flags) < 0)
TEST_ERROR;
if(!(driver_flags & H5FD_FEAT_DATA_SIEVE)) TEST_ERROR
/* Check for extra flags not accounted for above */
if(driver_flags != (H5FD_FEAT_DATA_SIEVE))
TEST_ERROR
/* can't create analogs of the following tests until the
* ROS3 driver is up and running in a minimal fashion.
* Comment them out until we get to them.
*/
#if 0
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
TEST_ERROR;
/* Retrieve the access property list... */
if((fapl_id_out = H5Fget_access_plist(fid)) < 0)
TEST_ERROR;
/* Check that the driver is correct */
if(H5FD_ROS3 != H5Pget_driver(fapl_id_out))
TEST_ERROR;
/* ...and close the property list */
if(H5Pclose(fapl_id_out) < 0)
TEST_ERROR;
/* Check that we can get an operating-system-specific handle from
* the library.
*/
if(H5Fget_vfd_handle(fid, H5P_DEFAULT, &os_file_handle) < 0)
TEST_ERROR;
if(os_file_handle == NULL)
FAIL_PUTS_ERROR("NULL os-specific vfd/file handle was returned from H5Fget_vfd_handle");
/* There is no garantee the size of metadata in file is constant.
* Just try to check if it's reasonable.
*
* Currently it should be around 2 KB.
*/
if(H5Fget_filesize(fid, &file_size) < 0)
TEST_ERROR;
if(file_size < 1 * KB || file_size > 4 * KB)
FAIL_PUTS_ERROR("suspicious file size obtained from H5Fget_filesize");
/* Close and delete the file */
if(H5Fclose(fid) < 0)
TEST_ERROR;
h5_delete_test_file(FILENAME[0], fapl_id);
/* Close the fapl */
if(H5Pclose(fapl_id) < 0)
TEST_ERROR;
#endif
PASSED();
return 0;
error:
H5E_BEGIN_TRY {
H5Pclose(fapl_id);
H5Pclose(fapl_id_out);
H5Fclose(fid);
} H5E_END_TRY;
return -1;
} /* end test_ros3() */
/*-------------------------------------------------------------------------
* Function: main
*
@ -1949,6 +2089,7 @@ main(void)
nerrors += test_log() < 0 ? 1 : 0;
nerrors += test_stdio() < 0 ? 1 : 0;
nerrors += test_windows() < 0 ? 1 : 0;
nerrors += test_ros3() < 0 ? 1 : 0;
if(nerrors) {
HDprintf("***** %d Virtual File Driver TEST%s FAILED! *****\n",

View File

@ -9,4 +9,11 @@ add_subdirectory (src)
#-- Add the tests
if (BUILD_TESTING)
add_subdirectory (test)
# --------------------------------------------------------------------
# If S3 or HDFS enabled, then we need to test the tools library
# --------------------------------------------------------------------
if (HDF5_ENABLE_ROS3_VFD OR HDF5_ENABLE_HDFS)
add_subdirectory (libtest)
endif ()
endif ()

View File

@ -21,6 +21,10 @@
#include "H5private.h"
#include "h5trav.h"
#ifdef H5_HAVE_ROS3_VFD
#include "H5FDros3.h"
#endif
/* global variables */
unsigned h5tools_nCols = 80;
/* ``get_option'' variables */
@ -97,7 +101,7 @@ parallel_print(const char* format, ...)
HDva_end(ap);
}
/*-------------------------------------------------------------------------
* Function: error_msg
*
@ -122,7 +126,7 @@ error_msg(const char *fmt, ...)
HDva_end(ap);
}
/*-------------------------------------------------------------------------
* Function: warn_msg
*
@ -161,7 +165,7 @@ help_ref_msg(FILE *output)
HDfprintf(output, "see the <%s> entry in the 'HDF5 Reference Manual'.\n",h5tools_getprogname());
}
/*-------------------------------------------------------------------------
* Function: get_option
*
@ -322,7 +326,229 @@ get_option(int argc, const char **argv, const char *opts, const struct long_opti
return opt_opt;
}
/*****************************************************************************
*
* Function: parse_tuple()
*
* Purpose:
*
* Create array of pointers to strings, identified as elements in a tuple
* of arbitrary length separated by provided character.
* ("tuple" because "nple" looks strange)
*
* * Receives pointer to start of tuple sequence string, '('.
* * Attempts to separate elements by token-character `sep`.
* * If the separator character is preceded by a backslash '\',
* the backslash is deleted and the separator is included in the
* element string as any other character.
* * To end an element with a backslash, escape the backslash, e.g.
* "(myelem\\,otherelem) -> {"myelem\", "otherelem"}
* * In all other cases, a backslash appearing not as part of "\\" or
* "\<sep>" digraph will be included berbatim.
* * Last two characters in the string MUST be ")\0".
*
* * Generates a copy of the input string `start`, (src..")\0"), replacing
* separators and close-paren with null charaters.
* * This string is allocated at runtime and should be freed when done.
* * Generates array of char pointers, and directs start of each element
* (each pointer) into this copy.
* * Each tuple element points to the start of its string (substring)
* and ends with a null terminator.
* * This array is allocated at runtime and should be freed when done.
* * Reallocates and expands elements array during parsing.
* * Initially allocated for 2 (plus one null entry), and grows by
* powers of 2.
* * The final 'slot' in the element array (elements[nelements], e.g.)
* always points to NULL.
* * The number of elements found and stored are passed out through pointer
* to unsigned, `nelems`.
*
* Return:
*
* FAIL If malformed--does not look like a tuple "(...)"
* or major error was encountered while parsing.
* or
* SUCCEED String looks properly formed "(...)" and no major errors.
*
* Stores number of elements through pointer `nelems`.
* Stores list of pointers to char (first char in each element
* string) through pointer `ptrs_out`.
* NOTE: `ptrs_out[nelems] == NULL` should be true.
* NOTE: list is malloc'd by function, and should be freed
* when done.
* Stores "source string" for element pointers through `cpy_out`.
* NOTE: Each element substring is null-terminated.
* NOTE: There may be extra characters after the last element
* (past its null terminator), but is guaranteed to
* be null-terminated.
* NOTE: `cpy_out` string is malloc'd by function,
* and should be freed when done.
*
* Programmer: Jacob Smith
* 2017-11-10
*
* Changes: None.
*
*****************************************************************************
*/
herr_t
parse_tuple(const char *start,
int sep,
char **cpy_out,
unsigned *nelems,
char ***ptrs_out)
{
char *elem_ptr = NULL;
char *dest_ptr = NULL;
unsigned elems_count = 0;
char **elems = NULL; /* more like *elems[], but complier... */
char **elems_re = NULL; /* temporary pointer, for realloc */
char *cpy = NULL;
herr_t ret_value = SUCCEED;
unsigned init_slots = 2;
/*****************
* SANITY-CHECKS *
*****************/
/* must start with "("
*/
if (start[0] != '(') {
ret_value = FAIL;
goto done;
}
/* must end with ")"
*/
while (start[elems_count] != '\0') {
elems_count++;
}
if (start[elems_count - 1] != ')') {
ret_value = FAIL;
goto done;
}
elems_count = 0;
/***********
* PREPARE *
***********/
/* create list
*/
elems = (char **)HDmalloc(sizeof(char *) * (init_slots + 1));
if (elems == NULL) { ret_value = FAIL; goto done; } /* CANTALLOC */
/* create destination string
*/
start++; /* advance past opening paren '(' */
cpy = (char *)HDmalloc(sizeof(char) * (HDstrlen(start))); /* no +1; less '(' */
if (cpy == NULL) { ret_value = FAIL; goto done; } /* CANTALLOC */
/* set pointers
*/
dest_ptr = cpy; /* start writing copy here */
elem_ptr = cpy; /* first element starts here */
elems[elems_count++] = elem_ptr; /* set first element pointer into list */
/*********
* PARSE *
*********/
while (*start != '\0') {
/* For each character in the source string...
*/
if (*start == '\\') {
/* Possibly an escape digraph.
*/
if ((*(start + 1) == '\\') ||
(*(start + 1) == sep) )
{
/* Valid escape digraph of "\\" or "\<sep>".
*/
start++; /* advance past escape char '\' */
*(dest_ptr++) = *(start++); /* Copy subsequent char */
/* and advance pointers. */
} else {
/* Not an accepted escape digraph.
* Copy backslash character.
*/
*(dest_ptr++) = *(start++);
}
} else if (*start == sep) {
/* Non-escaped separator.
* Terminate elements substring in copy, record element, advance.
* Expand elements list if appropriate.
*/
*(dest_ptr++) = 0; /* Null-terminate elem substring in copy */
/* and advance pointer. */
start++; /* Advance src pointer past separator. */
elem_ptr = dest_ptr; /* Element pointer points to start of first */
/* character after null sep in copy. */
elems[elems_count++] = elem_ptr; /* Set elem pointer in list */
/* and increment count. */
/* Expand elements list, if necessary.
*/
if (elems_count == init_slots) {
init_slots *= 2;
elems_re = (char **)realloc(elems, sizeof(char *) * \
(init_slots + 1));
if (elems_re == NULL) {
/* CANTREALLOC */
ret_value = FAIL;
goto done;
}
elems = elems_re;
}
} else if (*start == ')' && *(start + 1) == '\0') {
/* Found terminal, non-escaped close-paren. Last element.
* Write null terminator to copy.
* Advance source pointer to gently break from loop.
* Requred to prevent ")" from always being added to last element.
*/
start++;
} else {
/* Copy character into destination. Advance pointers.
*/
*(dest_ptr++) = *(start++);
}
}
*dest_ptr = '\0'; /* Null-terminate destination string. */
elems[elems_count] = NULL; /* Null-terminate elements list. */
/********************
* PASS BACK VALUES *
********************/
*ptrs_out = elems;
*nelems = elems_count;
*cpy_out = cpy;
done:
if (ret_value == FAIL) {
/* CLEANUP */
if (cpy) free(cpy);
if (elems) free(elems);
}
return ret_value;
} /* parse_tuple */
/*-------------------------------------------------------------------------
* Function: indentation
*
@ -344,7 +570,7 @@ indentation(unsigned x)
}
}
/*-------------------------------------------------------------------------
* Function: print_version
*
@ -362,7 +588,7 @@ print_version(const char *progname)
((const char *)H5_VERS_SUBRELEASE)[0] ? "-" : "", H5_VERS_SUBRELEASE);
}
/*-------------------------------------------------------------------------
* Function: init_table
*
@ -384,7 +610,7 @@ init_table(table_t **tbl)
*tbl = table;
}
/*-------------------------------------------------------------------------
* Function: free_table
*
@ -408,7 +634,7 @@ free_table(table_t *table)
}
#ifdef H5DUMP_DEBUG
/*-------------------------------------------------------------------------
* Function: dump_table
*
@ -429,7 +655,7 @@ dump_table(char* tablename, table_t *table)
table->objs[u].displayed, table->objs[u].recorded);
}
/*-------------------------------------------------------------------------
* Function: dump_tables
*
@ -447,7 +673,7 @@ dump_tables(find_objs_t *info)
}
#endif /* H5DUMP_DEBUG */
/*-------------------------------------------------------------------------
* Function: search_obj
*
@ -470,7 +696,7 @@ search_obj(table_t *table, haddr_t objno)
return NULL;
}
/*-------------------------------------------------------------------------
* Function: find_objs_cb
*
@ -546,7 +772,7 @@ find_objs_cb(const char *name, const H5O_info_t *oinfo, const char *already_seen
return ret_value;
}
/*-------------------------------------------------------------------------
* Function: init_objs
*
@ -591,7 +817,7 @@ done:
return ret_value;
}
/*-------------------------------------------------------------------------
* Function: add_obj
*
@ -622,7 +848,7 @@ add_obj(table_t *table, haddr_t objno, const char *objname, hbool_t record)
table->objs[u].displayed = 0;
}
#ifndef H5_HAVE_TMPFILE
/*-------------------------------------------------------------------------
* Function: tmpfile
@ -841,3 +1067,266 @@ done:
return ret_value;
}
/*----------------------------------------------------------------------------
*
* Function: h5tools_populate_ros3_fapl()
*
* Purpose:
*
* Set the values of a ROS3 fapl configuration object.
*
* If the values pointer is NULL, sets fapl target `fa` to a default
* (valid, current-version, non-authenticating) fapl config.
*
* If `values` pointer is _not_ NULL, expects `values` to contain at least
* three non-null pointers to null-terminated strings, corresponding to:
* { aws_region,
* secret_id,
* secret_key,
* }
* If all three strings are empty (""), the default fapl will be default.
* Both aws_region and secret_id values must be both empty or both
* populated. If
* Only secret_key is allowed to be empty (the empty string, "").
* All values are checked against overflow as defined in the ros3 vfd
* header file; if a value overruns the permitted space, FAIL is returned
* and the function aborts without resetting the fapl to values initially
* present.
*
* Return:
*
* 0 (failure) if...
* * Read-Only S3 VFD is not enabled.
* * NULL fapl pointer: (NULL, {...} )
* * Warning: In all cases below, fapl will be set as "default"
* before error occurs.
* * NULL value strings: (&fa, {NULL?, NULL? NULL?, ...})
* * Incomplete fapl info:
* * empty region, non-empty id, key either way
* * (&fa, {"", "...", "?"})
* * empty id, non-empty region, key either way
* * (&fa, {"...", "", "?"})
* * "non-empty key and either id or region empty
* * (&fa, {"", "", "...")
* * (&fa, {"", "...", "...")
* * (&fa, {"...", "", "...")
* * Any string would overflow allowed space in fapl definition.
* or
* 1 (success)
* * Sets components in fapl_t pointer, copying strings as appropriate.
* * "Default" fapl (valid version, authenticate->False, empty strings)
* * `values` pointer is NULL
* * (&fa, NULL)
* * first three strings in `values` are empty ("")
* * (&fa, {"", "", "", ...}
* * Authenticating fapl
* * region, id, and optional key provided
* * (&fa, {"...", "...", ""})
* * (&fa, {"...", "...", "..."})
*
* Programmer: Jacob Smith
* 2017-11-13
*
* Changes: None.
*
*----------------------------------------------------------------------------
*/
int
h5tools_populate_ros3_fapl(H5FD_ros3_fapl_t *fa,
const char **values)
{
#ifndef H5_HAVE_ROS3_VFD
return 0;
#else
int show_progress = 0; /* set to 1 for debugging */
int ret_value = 1; /* 1 for success, 0 for failure */
/* e.g.? if (!populate()) { then failed } */
if (show_progress) {
HDprintf("called h5tools_populate_ros3_fapl\n");
}
if (fa == NULL) {
if (show_progress) {
HDprintf(" ERROR: null pointer to fapl_t\n");
}
ret_value = 0;
goto done;
}
if (show_progress) {
HDprintf(" preset fapl with default values\n");
}
fa->version = H5FD__CURR_ROS3_FAPL_T_VERSION;
fa->authenticate = FALSE;
*(fa->aws_region) = '\0';
*(fa->secret_id) = '\0';
*(fa->secret_key) = '\0';
/* sanity-check supplied values
*/
if (values != NULL) {
if (values[0] == NULL) {
if (show_progress) {
HDprintf(" ERROR: aws_region value cannot be NULL\n");
}
ret_value = 0;
goto done;
}
if (values[1] == NULL) {
if (show_progress) {
HDprintf(" ERROR: secret_id value cannot be NULL\n");
}
ret_value = 0;
goto done;
}
if (values[2] == NULL) {
if (show_progress) {
HDprintf(" ERROR: secret_key value cannot be NULL\n");
}
ret_value = 0;
goto done;
}
/* if region and ID are supplied (key optional), write to fapl...
* fail if value would overflow
*/
if (*values[0] != '\0' &&
*values[1] != '\0')
{
if (HDstrlen(values[0]) > H5FD__ROS3_MAX_REGION_LEN) {
if (show_progress) {
HDprintf(" ERROR: aws_region value too long\n");
}
ret_value = 0;
goto done;
}
HDmemcpy(fa->aws_region, values[0],
(HDstrlen(values[0]) + 1));
if (show_progress) {
HDprintf(" aws_region set\n");
}
if (HDstrlen(values[1]) > H5FD__ROS3_MAX_SECRET_ID_LEN) {
if (show_progress) {
HDprintf(" ERROR: secret_id value too long\n");
}
ret_value = 0;
goto done;
}
HDmemcpy(fa->secret_id,
values[1],
(HDstrlen(values[1]) + 1));
if (show_progress) {
HDprintf(" secret_id set\n");
}
if (HDstrlen(values[2]) > H5FD__ROS3_MAX_SECRET_KEY_LEN) {
if (show_progress) {
HDprintf(" ERROR: secret_key value too long\n");
}
ret_value = 0;
goto done;
}
HDmemcpy(fa->secret_key,
values[2],
(HDstrlen(values[2]) + 1));
if (show_progress) {
HDprintf(" secret_key set\n");
}
fa->authenticate = TRUE;
if (show_progress) {
HDprintf(" set to authenticate\n");
}
} else if (*values[0] != '\0' ||
*values[1] != '\0' ||
*values[2] != '\0')
{
if (show_progress) {
HDprintf(
" ERROR: invalid assortment of empty/non-empty values\n"
);
}
ret_value = 0;
goto done;
}
} /* values != NULL */
done:
return ret_value;
#endif /* H5_HAVE_ROS3_VFD */
} /* h5tools_populate_ros3_fapl */
/*-----------------------------------------------------------------------------
*
* Function: h5tools_set_configured_fapl
*
* Purpose: prepare fapl_id with the given property list, according to
* VFD prototype.
*
* Return: 0 on failure, 1 on success
*
* Programmer: Jacob Smith
* 2018-05-21
*
* Changes: None.
*
*-----------------------------------------------------------------------------
*/
int
h5tools_set_configured_fapl(hid_t fapl_id,
const char vfd_name[],
void *fapl_t_ptr)
{
int ret_value = 1;
if (fapl_id < 0) {
return 0;
}
if (!strcmp("", vfd_name)) {
goto done;
#ifdef H5_HAVE_ROS3_VFD
} else if (!strcmp("ros3", vfd_name)) {
if ((fapl_id == H5P_DEFAULT) ||
(fapl_t_ptr == NULL) ||
(FAIL == H5Pset_fapl_ros3(
fapl_id,
(H5FD_ros3_fapl_t *)fapl_t_ptr)))
{
ret_value = 0;
goto done;
}
#endif /* H5_HAVE_ROS3_VFD */
#ifdef H5_HAVE_LIBHDFS
} else if (!strcmp("hdfs", vfd_name)) {
if ((fapl_id == H5P_DEFAULT) ||
(fapl_t_ptr == NULL) ||
(FAIL == H5Pset_fapl_hdfs(
fapl_id,
(H5FD_hdfs_fapl_t *)fapl_t_ptr)))
{
ret_value = 0;
goto done;
}
#endif /* H5_HAVE_LIBHDFS */
} else {
ret_value = 0; /* unrecognized fapl type "name" */
}
done:
return ret_value;
} /* h5tools_set_configured_fapl() */

View File

@ -123,6 +123,11 @@ H5TOOLS_DLLVAR unsigned h5tools_nCols; /*max number of columns for
H5TOOLS_DLL void indentation(unsigned);
H5TOOLS_DLL void print_version(const char *progname);
H5TOOLS_DLL void parallel_print(const char* format, ... );
H5TOOLS_DLL herr_t parse_tuple(const char *start,
int sep,
char **cpy_out,
unsigned *nelems,
char ***ptrs_out);
H5TOOLS_DLL void error_msg(const char *fmt, ...);
H5TOOLS_DLL void warn_msg(const char *fmt, ...);
H5TOOLS_DLL void help_ref_msg(FILE *output);
@ -174,6 +179,11 @@ H5TOOLS_DLL void h5tools_setprogname(const char*progname);
H5TOOLS_DLL int h5tools_getstatus(void);
H5TOOLS_DLL void h5tools_setstatus(int d_status);
H5TOOLS_DLL int h5tools_getenv_update_hyperslab_bufsize(void);
H5TOOLS_DLL int h5tools_set_configured_fapl(hid_t fapl_id,
const char vfd_name[],
void *fapl_t_ptr);
H5TOOLS_DLL int h5tools_populate_ros3_fapl(H5FD_ros3_fapl_t *fa,
const char **values);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,21 @@
cmake_minimum_required (VERSION 3.10)
project (HDF5_TOOLS_LIBTEST C)
#-----------------------------------------------------------------------------
# Add the h5tools_utils test executables
#-----------------------------------------------------------------------------
add_executable (h5tools_test_utils ${HDF5_TOOLS_LIBTEST_SOURCE_DIR}/h5tools_test_utils.c)
target_include_directories(h5tools_utils PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
TARGET_C_PROPERTIES (h5tools_utils STATIC)
target_link_libraries (h5tools_utils PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (h5tools_utils PROPERTIES FOLDER tools)
if (BUILD_SHARED_LIBS)
add_executable (h5tools_utils-shared ${HDF5_TOOLS_LIBTEST_SOURCE_DIR}/h5tools_utils.c)
target_include_directories(h5tools_utils-shared PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
TARGET_C_PROPERTIES (h5tools_utils-shared SHARED)
target_link_libraries (h5tools_utils-shared PRIVATE ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
set_target_properties (h5tools_utils-shared PROPERTIES FOLDER tools)
endif ()
include (CMakeTests.cmake)

View File

@ -0,0 +1,49 @@
#
# Copyright by The HDF Group.
# All rights reserved.
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
# the COPYING file, which can be found at the root of the source code
# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
# If you do not have access to either file, you may request a copy from
# help@hdfgroup.org.
#
##############################################################################
##############################################################################
### T E S T I N G ###
##############################################################################
##############################################################################
##############################################################################
##############################################################################
### T H E T E S T S M A C R O S ###
##############################################################################
##############################################################################
macro (ADD_H5_TEST resultfile resultcode)
add_test (
NAME H5LIBTEST-${resultfile}-clear-objects
COMMAND ${CMAKE_COMMAND}
-E remove
${resultfile}.out
${resultfile}.out.err
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5LIBTEST-${resultfile}-clear-objects PROPERTIES DEPENDS ${last_test})
endif ()
add_test (NAME H5LIBTEST-${resultfile} COMMAND $<TARGET_FILE:h5tools_utils> ${ARGN})
if (NOT "${resultcode}" STREQUAL "0")
set_tests_properties (H5LIBTEST-${resultfile} PROPERTIES WILL_FAIL "true")
endif ()
set_tests_properties (H5LIBTEST-${resultfile} PROPERTIES DEPENDS H5LIBTEST-${resultfile}-clear-objects)
endmacro ()
##############################################################################
##############################################################################
### T H E T E S T S ###
##############################################################################
##############################################################################
ADD_H5_TEST (h5tools_utils-default 0)

34
tools/libtest/Makefile.am Normal file
View File

@ -0,0 +1,34 @@
#
# Read-Only S3 Virtual File Driver (VFD)
# Copyright (c) 2017-2018, The HDF Group.
#
# All rights reserved.
#
# NOTICE:
# All information contained herein is, and remains, the property of The HDF
# Group. The intellectual and technical concepts contained herein are
# proprietary to The HDF Group. Dissemination of this information or
# reproduction of this material is strictly forbidden unless prior written
# permission is obtained from The HDF Group.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
#
# HDF5 Library Makefile(.in)
#
include $(top_srcdir)/config/commence.am
# Include src and tools/lib directories
AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib
# All programs depend on the hdf5 and h5tools libraries
LDADD=$(LIBH5TOOLS) $(LIBHDF5)
# main target
bin_PROGRAMS=h5tools_test_utils
# check_PROGRAMS=$(TEST_PROG)
include $(top_srcdir)/config/conclude.am

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,23 @@ static int doxml = 0;
static int useschema = 1;
static const char *xml_dtd_uri = NULL;
static H5FD_ros3_fapl_t ros3_fa = {
1, /* version */
false, /* authenticate */
"", /* aws region */
"", /* access key id */
"", /* secret access key */
};
static H5FD_hdfs_fapl_t hdfs_fa = {
1, /* fapl version */
"localhost", /* namenode name */
0, /* namenode port */
"", /* kerberos ticket cache */
"", /* user name */
2048, /* stream buffer size */
};
/* module-scoped variables for XML option */
#define DEFAULT_XSD "http://www.hdfgroup.org/HDF5/XML/schema/HDF5-File.xsd"
#define DEFAULT_DTD "http://www.hdfgroup.org/HDF5/XML/DTD/HDF5-File.dtd"
@ -188,6 +205,8 @@ static struct long_options l_opts[] = {
{ "any_path", require_arg, 'N' },
{ "vds-view-first-missing", no_arg, 'v' },
{ "vds-gap-size", require_arg, 'G' },
{ "s3-cred", require_arg, '$' },
{ "hdfs-attrs", require_arg, '#' },
{ NULL, 0, '\0' }
};
@ -241,6 +260,16 @@ usage(const char *prog)
PRINTVALSTREAM(rawoutstream, " -b B, --binary=B Binary file output, of form B\n");
PRINTVALSTREAM(rawoutstream, " -O F, --ddl=F Output ddl text into file F\n");
PRINTVALSTREAM(rawoutstream, " Use blank(empty) filename F to suppress ddl display\n");
PRINTVALSTREAM(rawoutstream, " --s3-cred=<cred> Supply S3 authentication information to \"ros3\" vfd.\n");
PRINTVALSTREAM(rawoutstream, " <cred> :: \"(<aws-region>,<access-id>,<access-key>)\"\n");
PRINTVALSTREAM(rawoutstream, " If absent or <cred> -> \"(,,)\", no authentication.\n");
PRINTVALSTREAM(rawoutstream, " Has no effect is filedriver is not `ros3'.\n");
PRINTVALSTREAM(rawoutstream, " --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.\n");
PRINTVALSTREAM(rawoutstream, " For use with \"--filedriver=hdfs\"\n");
PRINTVALSTREAM(rawoutstream, " <attrs> :: (<namenode name>,<namenode port>,\n");
PRINTVALSTREAM(rawoutstream, " <kerberos cache path>,<username>,\n");
PRINTVALSTREAM(rawoutstream, " <buffer size>)\n");
PRINTVALSTREAM(rawoutstream, " Any absent attribute will use a default value.\n");
PRINTVALSTREAM(rawoutstream, "--------------- Object Options ---------------\n");
PRINTVALSTREAM(rawoutstream, " -a P, --attribute=P Print the specified attribute\n");
PRINTVALSTREAM(rawoutstream, " If an attribute name contains a slash (/), escape the\n");
@ -1282,6 +1311,126 @@ end_collect:
hand = NULL;
h5tools_setstatus(EXIT_SUCCESS);
goto done;
case '$':
#ifndef H5_HAVE_ROS3_VFD
error_msg("Read-Only S3 VFD not enabled.\n");
h5tools_setstatus(EXIT_FAILURE);
goto done;
#else
/* s3 credential */
{
char **s3_cred = NULL;
char *s3_cred_string = NULL;
const char *ccred[3];
unsigned nelems = 0;
if ( FAIL ==
parse_tuple(opt_arg, ',',
&s3_cred_string, &nelems, &s3_cred))
{
error_msg("unable to parse malformed s3 credentials\n");
usage(h5tools_getprogname());
free_handler(hand, argc);
hand= NULL;
h5tools_setstatus(EXIT_FAILURE);
goto done;
}
if (nelems != 3) {
error_msg("s3 credentials expects 3 elements\n");
usage(h5tools_getprogname());
free_handler(hand, argc);
hand= NULL;
h5tools_setstatus(EXIT_FAILURE);
goto done;
}
ccred[0] = (const char *)s3_cred[0];
ccred[1] = (const char *)s3_cred[1];
ccred[2] = (const char *)s3_cred[2];
if (0 == h5tools_populate_ros3_fapl(&ros3_fa, ccred)) {
error_msg("Invalid S3 credentials\n");
usage(h5tools_getprogname());
free_handler(hand, argc);
hand= NULL;
h5tools_setstatus(EXIT_FAILURE);
goto done;
}
HDfree(s3_cred);
HDfree(s3_cred_string);
} /* s3 credential block */
break;
#endif /* H5_HAVE_ROS3_VFD */
case '#':
#ifndef H5_HAVE_LIBHDFS
error_msg("HDFS VFD is not enabled.\n");
goto error;
#else
{
/* read hdfs properties tuple and store values in `hdfs_fa`
*/
unsigned nelems = 0;
char *props_src = NULL;
char **props = NULL;
unsigned long k = 0;
if (FAIL == parse_tuple(
(const char *)opt_arg,
',',
&props_src,
&nelems,
&props))
{
error_msg("unable to parse hdfs properties tuple\n");
goto error;
}
/* sanity-check tuple count
*/
if (nelems != 5) {
h5tools_setstatus(EXIT_FAILURE);
goto error;
}
/* Populate fapl configuration structure with given
* properties.
* WARNING: No error-checking is done on length of input
* strings... Silent overflow is possible, albeit
* unlikely.
*/
if (strncmp(props[0], "", 1)) {
HDstrncpy(hdfs_fa.namenode_name,
(const char *)props[0],
HDstrlen(props[0]));
}
if (strncmp(props[1], "", 1)) {
k = strtoul((const char *)props[1], NULL, 0);
if (errno == ERANGE) {
h5tools_setstatus(EXIT_FAILURE);
goto error;
}
hdfs_fa.namenode_port = (int32_t)k;
}
if (strncmp(props[2], "", 1)) {
HDstrncpy(hdfs_fa.kerberos_ticket_cache,
(const char *)props[2],
HDstrlen(props[2]));
}
if (strncmp(props[3], "", 1)) {
HDstrncpy(hdfs_fa.user_name,
(const char *)props[3],
HDstrlen(props[3]));
}
if (strncmp(props[4], "", 1)) {
k = strtoul((const char *)props[4], NULL, 0);
if (errno == ERANGE) {
h5tools_setstatus(EXIT_FAILURE);
goto error;
}
hdfs_fa.stream_buffer_size = (int32_t)k;
}
HDfree(props);
HDfree(props_src);
}
#endif /* H5_HAVE_LIBHDFS */
break;
case '?':
default:
usage(h5tools_getprogname());
@ -1354,6 +1503,7 @@ main(int argc, const char *argv[])
{
hid_t fid = -1;
hid_t gid = -1;
hid_t fapl_id = H5P_DEFAULT;
H5E_auto2_t func;
H5E_auto2_t tools_func;
H5O_info_t oi;
@ -1440,10 +1590,60 @@ main(int argc, const char *argv[])
/* Initialize indexing options */
h5trav_set_index(sort_by, sort_order);
if (driver != NULL) {
void *conf_fa = NULL;
if (!strcmp(driver, "ros3")) {
#ifndef H5_HAVE_ROS3_VFD
error_msg("Read-Only S3 VFD not enabled.\n");
h5tools_setstatus(EXIT_FAILURE);
goto done;
#else
conf_fa = (void *)&ros3_fa;
#endif /* H5_HAVE_ROS3_VFD */
} else if (!HDstrcmp(driver, "hdfs")) {
#ifndef H5_HAVE_LIBHDFS
error_msg("HDFS VFD is not enabled.\n");
h5tools_setstatus(EXIT_FAILURE);
goto done;
#else
conf_fa = (void *)&hdfs_fa;
#endif /* H5_HAVE_LIBHDFS */
}
if (conf_fa != NULL) {
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
if (fapl_id < 0) {
error_msg("unable to create fapl entry\n");
h5tools_setstatus(EXIT_FAILURE);
goto done;
}
if (0 == h5tools_set_configured_fapl(
fapl_id,
driver, /* guaranteed "ros3" or "hdfs" */
conf_fa)) /* appropriate to driver */
{
error_msg("unable to set fapl\n");
h5tools_setstatus(EXIT_FAILURE);
goto done;
}
}
} /* driver defined */
while(opt_ind < argc) {
fname = HDstrdup(argv[opt_ind++]);
fid = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, driver, NULL, 0);
if (fapl_id != H5P_DEFAULT) {
fid = H5Fopen(fname, H5F_ACC_RDONLY, fapl_id);
} else {
fid = h5tools_fopen(
fname,
H5F_ACC_RDONLY,
H5P_DEFAULT,
driver,
NULL,
0);
}
if (fid < 0) {
error_msg("unable to open file \"%s\"\n", fname);
@ -1624,6 +1824,11 @@ done:
/* Free tables for objects */
table_list_free();
if (fapl_id != H5P_DEFAULT && 0 < H5Pclose(fapl_id)) {
error_msg("Can't close fapl entry\n");
h5tools_setstatus(EXIT_FAILURE);
}
if(fid >=0)
if (H5Fclose(fid) < 0)
h5tools_setstatus(EXIT_FAILURE);
@ -1645,127 +1850,7 @@ done:
H5Eset_auto2(H5E_DEFAULT, func, edata);
leave(h5tools_getstatus());
}
/*-------------------------------------------------------------------------
* Function: h5_fileaccess
*
* Purpose: Returns a file access template which is the default template
* but with a file driver set according to the constant or
* environment variable HDF5_DRIVER
*
* Return: Success: A file access property list
*
* Failure: -1
*
* Programmer: Robb Matzke
* Thursday, November 19, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
hid_t
h5_fileaccess(void)
{
static const char *multi_letters = "msbrglo";
const char *val = NULL;
const char *name;
char s[1024];
hid_t fapl = -1;
/* First use the environment variable, then the constant */
val = HDgetenv("HDF5_DRIVER");
#ifdef HDF5_DRIVER
if (!val) val = HDF5_DRIVER;
#endif
if ((fapl=H5Pcreate(H5P_FILE_ACCESS))<0) return -1;
if (!val || !*val) return fapl; /*use default*/
HDstrncpy(s, val, sizeof s);
s[sizeof(s)-1] = '\0';
if (NULL==(name=HDstrtok(s, " \t\n\r"))) return fapl;
if (!HDstrcmp(name, "sec2")) {
/* Unix read() and write() system calls */
if (H5Pset_fapl_sec2(fapl)<0) return -1;
}
else if (!HDstrcmp(name, "stdio")) {
/* Standard C fread() and fwrite() system calls */
if (H5Pset_fapl_stdio(fapl)<0) return -1;
}
else if (!HDstrcmp(name, "core")) {
/* In-core temporary file with 1MB increment */
if (H5Pset_fapl_core(fapl, 1024*1024, FALSE)<0) return -1;
}
else if (!HDstrcmp(name, "split")) {
/* Split meta data and raw data each using default driver */
if (H5Pset_fapl_split(fapl, "-m.h5", H5P_DEFAULT, "-r.h5", H5P_DEFAULT) < 0)
return -1;
}
else if (!HDstrcmp(name, "multi")) {
/* Multi-file driver, general case of the split driver */
H5FD_mem_t memb_map[H5FD_MEM_NTYPES];
hid_t memb_fapl[H5FD_MEM_NTYPES];
const char *memb_name[H5FD_MEM_NTYPES];
char sv[H5FD_MEM_NTYPES][1024];
haddr_t memb_addr[H5FD_MEM_NTYPES];
H5FD_mem_t mt;
HDmemset(memb_map, 0, sizeof memb_map);
HDmemset(memb_fapl, 0, sizeof memb_fapl);
HDmemset(memb_name, 0, sizeof memb_name);
HDmemset(memb_addr, 0, sizeof memb_addr);
if(HDstrlen(multi_letters)==H5FD_MEM_NTYPES) {
for (mt=H5FD_MEM_DEFAULT; mt<H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t,mt)) {
memb_fapl[mt] = H5P_DEFAULT;
memb_map[mt] = mt;
sprintf(sv[mt], "%%s-%c.h5", multi_letters[mt]);
memb_name[mt] = sv[mt];
memb_addr[mt] = (haddr_t)MAX(mt - 1, 0) * (HADDR_MAX / 10);
}
}
else {
error_msg("Bad multi_letters list\n");
return FAIL;
}
if (H5Pset_fapl_multi(fapl, memb_map, memb_fapl, memb_name, memb_addr, FALSE) < 0)
return -1;
}
else if (!HDstrcmp(name, "family")) {
hsize_t fam_size = 100*1024*1024; /*100 MB*/
/* Family of files, each 1MB and using the default driver */
if ((val=HDstrtok(NULL, " \t\n\r")))
fam_size = (hsize_t)(HDstrtod(val, NULL) * 1024*1024);
if (H5Pset_fapl_family(fapl, fam_size, H5P_DEFAULT)<0)
return -1;
}
else if (!HDstrcmp(name, "log")) {
long log_flags = H5FD_LOG_LOC_IO;
/* Log file access */
if ((val = HDstrtok(NULL, " \t\n\r")))
log_flags = HDstrtol(val, NULL, 0);
if (H5Pset_fapl_log(fapl, NULL, (unsigned)log_flags, 0) < 0)
return -1;
}
else if (!HDstrcmp(name, "direct")) {
/* Substitute Direct I/O driver with sec2 driver temporarily because
* some output has sec2 driver as the standard. */
if (H5Pset_fapl_sec2(fapl)<0) return -1;
}
else {
/* Unknown driver */
return -1;
}
return fapl;
}
} /* main */
/*-------------------------------------------------------------------------
@ -1813,3 +1898,4 @@ add_prefix(char **prfx, size_t *prfx_len, const char *name)
HDstrcat(HDstrcat(*prfx, "/"), name);
} /* end add_prefix */

View File

@ -158,7 +158,7 @@ static hbool_t print_int_type(h5tools_str_t *buffer, hid_t type, int ind);
static hbool_t print_float_type(h5tools_str_t *buffer, hid_t type, int ind);
static herr_t visit_obj(hid_t file, const char *oname, iter_t *iter);
/*-------------------------------------------------------------------------
* Function: usage
*
@ -216,6 +216,15 @@ usage (void)
PRINTVALSTREAM(rawoutstream, " -V, --version Print version number and exit\n");
PRINTVALSTREAM(rawoutstream, " --vfd=DRIVER Use the specified virtual file driver\n");
PRINTVALSTREAM(rawoutstream, " -x, --hexdump Show raw data in hexadecimal format\n");
PRINTVALSTREAM(rawoutstream, " --s3-cred=C Supply S3 authentication information to \"ros3\" vfd.\n");
PRINTVALSTREAM(rawoutstream, " Accepts tuple of \"(<aws-region>,<access-id>,<access-key>)\".\n");
PRINTVALSTREAM(rawoutstream, " If absent or C->\"(,,)\", defaults to no-authentication.\n");
PRINTVALSTREAM(rawoutstream, " Has no effect if vfd flag not set to \"ros3\".\n");
PRINTVALSTREAM(rawoutstream, " --hdfs-attrs=A Supply configuration information to Hadoop VFD.\n");
PRINTVALSTREAM(rawoutstream, " Accepts tuple of (<namenode name>,<namenode port>,\n");
PRINTVALSTREAM(rawoutstream, " ...<kerberos cache path>,<username>,<buffer size>)\n");
PRINTVALSTREAM(rawoutstream, " If absent or A == '(,,,,)', all default values are used.\n");
PRINTVALSTREAM(rawoutstream, " Has no effect if vfd flag is not 'hdfs'.\n");
PRINTVALSTREAM(rawoutstream, "\n");
PRINTVALSTREAM(rawoutstream, " file/OBJECT\n");
PRINTVALSTREAM(rawoutstream, " Each object consists of an HDF5 file name optionally followed by a\n");
@ -237,7 +246,7 @@ usage (void)
PRINTVALSTREAM(rawoutstream, " Replaced by --enable-error-stack.\n");
}
/*-------------------------------------------------------------------------
* Function: print_string
@ -315,7 +324,7 @@ print_string(h5tools_str_t *buffer, const char *s, hbool_t escape_spaces)
return nprint;
}
/*-------------------------------------------------------------------------
* Function: print_obj_name
*
@ -364,7 +373,7 @@ print_obj_name(h5tools_str_t *buffer, const iter_t *iter, const char *oname,
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_native_type
*
@ -489,7 +498,7 @@ print_native_type(h5tools_str_t *buffer, hid_t type, int ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_ieee_type
*
@ -527,7 +536,7 @@ print_ieee_type(h5tools_str_t *buffer, hid_t type, int ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_precision
*
@ -619,7 +628,7 @@ print_precision(h5tools_str_t *buffer, hid_t type, int ind)
}
}
/*-------------------------------------------------------------------------
* Function: print_int_type
*
@ -693,7 +702,7 @@ print_int_type(h5tools_str_t *buffer, hid_t type, int ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_float_type
*
@ -807,7 +816,7 @@ print_float_type(h5tools_str_t *buffer, hid_t type, int ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_cmpd_type
*
@ -860,7 +869,7 @@ print_cmpd_type(h5tools_str_t *buffer, hid_t type, int ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_enum_type
*
@ -985,7 +994,7 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_string_type
*
@ -1086,7 +1095,7 @@ print_string_type(h5tools_str_t *buffer, hid_t type, int H5_ATTR_UNUSED ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_reference_type
*
@ -1124,7 +1133,7 @@ print_reference_type(h5tools_str_t *buffer, hid_t type, int H5_ATTR_UNUSED ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_opaque_type
*
@ -1160,7 +1169,7 @@ print_opaque_type(h5tools_str_t *buffer, hid_t type, int ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_vlen_type
*
@ -1190,7 +1199,7 @@ print_vlen_type(h5tools_str_t *buffer, hid_t type, int ind)
return TRUE;
}
/*---------------------------------------------------------------------------
* Purpose: Print information about an array type
*
@ -1237,7 +1246,7 @@ print_array_type(h5tools_str_t *buffer, hid_t type, int ind)
return TRUE;
}
/*-------------------------------------------------------------------------
* Function: print_bitfield_type
*
@ -1345,7 +1354,7 @@ print_type(h5tools_str_t *buffer, hid_t type, int ind)
(unsigned long)H5Tget_size(type), (unsigned)data_class);
}
/*-------------------------------------------------------------------------
* Function: dump_dataset_values
*
@ -1475,7 +1484,7 @@ dump_dataset_values(hid_t dset)
PRINTVALSTREAM(rawoutstream, "\n");
}
/*-------------------------------------------------------------------------
* Function: list_attr
*
@ -1662,7 +1671,7 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain
return 0;
}
/*-------------------------------------------------------------------------
* Function: dataset_list1
*
@ -1727,7 +1736,7 @@ dataset_list1(hid_t dset)
return 0;
}
/*-------------------------------------------------------------------------
* Function: dataset_list2
*
@ -1962,7 +1971,7 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name)
return 0;
} /* end dataset_list2() */
/*-------------------------------------------------------------------------
* Function: datatype_list2
*
@ -2004,7 +2013,7 @@ datatype_list2(hid_t type, const char H5_ATTR_UNUSED *name)
return 0;
}
/*-------------------------------------------------------------------------
* Function: list_obj
*
@ -2160,7 +2169,7 @@ done:
} /* end list_obj() */
/*-------------------------------------------------------------------------
* Function: list_lnk
*
@ -2354,7 +2363,7 @@ done:
return 0;
} /* end list_lnk() */
/*-------------------------------------------------------------------------
* Function: visit_obj
*
@ -2434,7 +2443,7 @@ done:
return retval;
}
/*-------------------------------------------------------------------------
* Function: get_width
*
@ -2550,7 +2559,7 @@ out:
return ret;
}
/*-------------------------------------------------------------------------
* Function: leave
*
@ -2573,7 +2582,7 @@ leave(int ret)
HDexit(ret);
}
/*-------------------------------------------------------------------------
* Function: main
*
@ -2602,6 +2611,26 @@ main(int argc, const char *argv[])
char drivername[50];
const char *preferred_driver = NULL;
int err_exit = 0;
hid_t fapl_id = H5P_DEFAULT;
/* default "anonymous" s3 configuration */
H5FD_ros3_fapl_t ros3_fa = {
1, /* fapl version */
false, /* authenticate */
"", /* aws region */
"", /* access key id */
"", /* secret access key */
};
/* "default" HDFS configuration */
H5FD_hdfs_fapl_t hdfs_fa = {
1, /* fapl version */
"localhost", /* namenode name */
0, /* namenode port */
"", /* kerberos ticket cache */
"", /* user name */
2048, /* stream buffer size */
};
h5tools_setprogname(PROGRAMNAME);
h5tools_setstatus(EXIT_SUCCESS);
@ -2701,6 +2730,185 @@ main(int argc, const char *argv[])
usage();
leave(EXIT_FAILURE);
}
} else if (!HDstrncmp(argv[argno], "--s3-cred=", (size_t)10)) {
#ifndef H5_HAVE_ROS3_VFD
HDfprintf(rawerrorstream,
"Error: Read-Only S3 VFD is not enabled\n\n");
usage();
leave(EXIT_FAILURE);
#else
unsigned nelems = 0;
char *start = NULL;
char *s3cred_src = NULL;
char **s3cred = NULL;
char const *ccred[3];
/* try to parse s3 credentials tuple
*/
start = strchr(argv[argno], '=');
if (start == NULL) {
HDfprintf(rawerrorstream,
"Error: Unable to parse null credentials tuple\n"
" For anonymous access, omit \"--s3-cred\" and use"
"only \"--vfd=ros3\"\n\n");
usage();
leave(EXIT_FAILURE);
}
start++;
if (FAIL ==
parse_tuple((const char *)start, ',',
&s3cred_src, &nelems, &s3cred))
{
HDfprintf(rawerrorstream,
"Error: Unable to parse S3 credentials\n\n");
usage();
leave(EXIT_FAILURE);
}
/* sanity-check tuple count
*/
if (nelems != 3) {
HDfprintf(rawerrorstream,
"Error: Invalid S3 credentials\n\n");
usage();
leave(EXIT_FAILURE);
}
ccred[0] = (const char *)s3cred[0];
ccred[1] = (const char *)s3cred[1];
ccred[2] = (const char *)s3cred[2];
if (0 == h5tools_populate_ros3_fapl(&ros3_fa, ccred)) {
HDfprintf(rawerrorstream,
"Error: Invalid S3 credentials\n\n");
usage();
leave(EXIT_FAILURE);
}
HDfree(s3cred);
HDfree(s3cred_src);
#endif /* H5_HAVE_ROS3_VFD */
} else if (!HDstrncmp(argv[argno], "--hdfs-attrs=", (size_t)13)) {
#ifndef H5_HAVE_LIBHDFS
PRINTVALSTREAM(rawoutstream, "The HDFS VFD is not enabled.\n");
leave(EXIT_FAILURE);
#else
/* Parse received configuration data and set fapl config struct
*/
hbool_t _debug = FALSE;
unsigned nelems = 0;
char const *start = NULL;
char *props_src = NULL;
char **props = NULL;
unsigned long k = 0;
/* try to parse tuple
*/
if (_debug) {
HDfprintf(stderr, "configuring hdfs...\n");
}
start = argv[argno]+13; /* should never segfault: worst case of */
if (*start != '(') /* null-termintor after '='. */
{
if (_debug) {
HDfprintf(stderr, " no tuple.\n");
}
usage();
leave(EXIT_FAILURE);
}
if (FAIL ==
parse_tuple((const char *)start, ',',
&props_src, &nelems, &props))
{
HDfprintf(stderr,
" unable to parse tuple.\n");
usage();
leave(EXIT_FAILURE);
}
/* sanity-check tuple count
*/
if (nelems != 5) {
HDfprintf(stderr,
" expected 5-ple, got `%d`\n",
nelems);
usage();
leave(EXIT_FAILURE);
}
if (_debug) {
HDfprintf(stderr,
" got hdfs-attrs tuple: `(%s,%s,%s,%s,%s)`\n",
props[0],
props[1],
props[2],
props[3],
props[4]);
}
/* Populate fapl configuration structure with given properties.
* WARNING: No error-checking is done on length of input strings...
* Silent overflow is possible, albeit unlikely.
*/
if (HDstrncmp(props[0], "", 1)) {
if (_debug) {
HDfprintf(stderr,
" setting namenode name: %s\n",
props[0]);
}
HDstrncpy(hdfs_fa.namenode_name,
(const char *)props[0],
HDstrlen(props[0]));
}
if (HDstrncmp(props[1], "", 1)) {
k = strtoul((const char *)props[1], NULL, 0);
if (errno == ERANGE) {
HDfprintf(stderr,
" supposed port number wasn't.\n");
leave(EXIT_FAILURE);
}
if (_debug) {
HDfprintf(stderr,
" setting namenode port: %lu\n",
k);
}
hdfs_fa.namenode_port = (int32_t)k;
}
if (HDstrncmp(props[2], "", 1)) {
if (_debug) {
HDfprintf(stderr,
" setting kerb cache path: %s\n",
props[2]);
}
HDstrncpy(hdfs_fa.kerberos_ticket_cache,
(const char *)props[2],
HDstrlen(props[2]));
}
if (HDstrncmp(props[3], "", 1)) {
if (_debug) {
HDfprintf(stderr,
" setting username: %s\n",
props[3]);
}
HDstrncpy(hdfs_fa.user_name,
(const char *)props[3],
HDstrlen(props[3]));
}
if (HDstrncmp(props[4], "", 1)) {
k = HDstrtoul((const char *)props[4], NULL, 0);
if (errno == ERANGE) {
HDfprintf(stderr,
" supposed buffersize number wasn't.\n");
leave(EXIT_FAILURE);
}
if (_debug) {
HDfprintf(stderr,
" setting stream buffer size: %lu\n",
k);
}
hdfs_fa.stream_buffer_size = (int32_t)k;
}
HDfree(props);
HDfree(props_src);
#endif /* H5_HAVE_LIBHDFS */
} else if('-'!=argv[argno][1]) {
/* Single-letter switches */
for(s = argv[argno] + 1; *s; s++) {
@ -2772,6 +2980,7 @@ main(int argc, const char *argv[])
} /* end switch */
} /* end for */
} else {
HDfprintf(stderr, "Unknown argument: %s\n", argv[argno]);
usage();
leave(EXIT_FAILURE);
}
@ -2791,6 +3000,49 @@ main(int argc, const char *argv[])
leave(EXIT_FAILURE);
}
if (preferred_driver) {
void *conf_fa = NULL;
if (!HDstrcmp(preferred_driver, "ros3")) {
#ifndef H5_HAVE_ROS3_VFD
HDfprintf(rawerrorstream,
"Error: Read-Only S3 VFD not enabled.\n\n");
usage();
leave(EXIT_FAILURE);
#else
conf_fa = (void *)&ros3_fa;
#endif /* H5_HAVE_ROS3_VFD */
} else if (!HDstrcmp(preferred_driver, "hdfs")) {
#ifndef H5_HAVE_LIBHDFS
PRINTVALSTREAM(rawoutstream, "The HDFS VFD is not enabled.\n");
leave(EXIT_FAILURE);
#else
conf_fa = (void *)&hdfs_fa;
#endif /* H5_HAVE_LIBHDFS */
}
if (conf_fa != NULL) {
HDassert(fapl_id == H5P_DEFAULT);
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
if (fapl_id < 0) {
HDfprintf(rawerrorstream,
"Error: Unable to create fapl entry\n\n");
leave(EXIT_FAILURE);
}
if (0 == h5tools_set_configured_fapl(
fapl_id,
preferred_driver,
conf_fa))
{
HDfprintf(rawerrorstream,
"Error: Unable to set fapl\n\n");
usage();
leave(EXIT_FAILURE);
}
}
} /* preferred_driver defined */
/* Turn off HDF5's automatic error printing unless you're debugging h5ls */
if(!show_errors_g)
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
@ -2820,7 +3072,12 @@ main(int argc, const char *argv[])
file = -1;
while(fname && *fname) {
file = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, preferred_driver, drivername, sizeof drivername);
if (fapl_id != H5P_DEFAULT) {
file = H5Fopen(fname, H5F_ACC_RDONLY, fapl_id);
}
else {
file = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, preferred_driver, drivername, sizeof drivername);
}
if(file >= 0) {
if(verbose_g)
@ -2933,6 +3190,14 @@ main(int argc, const char *argv[])
err_exit = 1;
} /* end while */
if (fapl_id != H5P_DEFAULT) {
if (0 < H5Pclose(fapl_id)) {
HDfprintf(rawerrorstream,
"Error: Unable to set close fapl entry\n\n");
leave(EXIT_FAILURE);
}
}
if (err_exit)
leave(EXIT_FAILURE);
else

View File

@ -74,7 +74,7 @@ typedef struct iter_t {
ohdr_info_t group_ohdr_info; /* Object header information for groups */
hsize_t max_attrs; /* Maximum attributes from a group */
unsigned long *num_small_attrs; /* Size of small attributes tracked */
unsigned long *num_small_attrs; /* Size of small attributes tracked */
unsigned attr_nbins; /* Number of bins for attribute counts */
unsigned long *attr_bins; /* Pointer to array of bins for attribute counts */
@ -118,6 +118,29 @@ typedef struct iter_t {
} iter_t;
static const char *drivername = "";
/* default "anonymous" s3 configuration
*/
static H5FD_ros3_fapl_t ros3_fa = {
1, /* fapl version */
false, /* authenticate */
"", /* aws region */
"", /* access key id */
"", /* secret access key */
};
/* default HDFS access configuration
*/
static H5FD_hdfs_fapl_t hdfs_fa = {
1, /* fapl version */
"localhost", /* namenode name */
0, /* namenode port */
"", /* kerberos ticket cache */
"", /* user name */
2048, /* stream buffer size */
};
static int display_all = TRUE;
/* Enable the printing of selected statistics */
@ -146,7 +169,7 @@ struct handler_t {
char **obj;
};
static const char *s_opts ="Aa:Ddm:EFfhGgl:sSTO:V";
static const char *s_opts ="Aa:Ddm:EFfhGgl:sSTO:Vw:";
/* e.g. "filemetadata" has to precede "file"; "groupmetadata" has to precede "group" etc. */
static struct long_options l_opts[] = {
{"help", no_arg, 'h'},
@ -246,6 +269,8 @@ static struct long_options l_opts[] = {
{ "summ", no_arg, 'S' },
{ "sum", no_arg, 'S' },
{ "su", no_arg, 'S' },
{ "s3-cred", require_arg, 'w' },
{ "hdfs-attrs", require_arg, 'H' },
{ NULL, 0, '\0' }
};
@ -257,7 +282,7 @@ leave(int ret)
}
/*-------------------------------------------------------------------------
* Function: usage
*
@ -295,9 +320,19 @@ static void usage(const char *prog)
HDfprintf(stdout, " -s, --freespace Print free space information\n");
HDfprintf(stdout, " -S, --summary Print summary of file space information\n");
HDfprintf(stdout, " --enable-error-stack Prints messages from the HDF5 error stack as they occur\n");
HDfprintf(stdout, " --s3-cred=<cred> Access file on S3, using provided credential\n");
HDfprintf(stdout, " <cred> :: (region,id,key)\n");
HDfprintf(stdout, " If <cred> == \"(,,)\", no authentication is used.\n");
HDfprintf(stdout, " --hdfs-attrs=<attrs> Access a file on HDFS with given configuration\n");
HDfprintf(stdout, " attributes.\n");
HDfprintf(stdout, " <attrs> :: (<namenode name>,<namenode port>,\n");
HDfprintf(stdout, " <kerberos cache path>,<username>,\n");
HDfprintf(stdout, " <buffer size>)\n");
HDfprintf(stdout, " If an attribute is empty, a default value will be\n");
HDfprintf(stdout, " used.\n");
}
/*-------------------------------------------------------------------------
* Function: ceil_log10
*
@ -324,7 +359,7 @@ ceil_log10(unsigned long x)
return ret;
} /* ceil_log10() */
/*-------------------------------------------------------------------------
* Function: attribute_stats
*
@ -374,7 +409,7 @@ attribute_stats(iter_t *iter, const H5O_info_t *oi)
return 0;
} /* end attribute_stats() */
/*-------------------------------------------------------------------------
* Function: group_stats
*
@ -456,7 +491,7 @@ done:
return ret_value;
} /* end group_stats() */
/*-------------------------------------------------------------------------
* Function: dataset_stats
*
@ -647,7 +682,7 @@ done:
return ret_value;
} /* end dataset_stats() */
/*-------------------------------------------------------------------------
* Function: datatype_stats
*
@ -679,7 +714,7 @@ done:
return ret_value;
} /* end datatype_stats() */
/*-------------------------------------------------------------------------
* Function: obj_stats
*
@ -735,7 +770,7 @@ done:
return ret_value;
} /* end obj_stats() */
/*-------------------------------------------------------------------------
* Function: lnk_stats
*
@ -833,7 +868,7 @@ freespace_stats(hid_t fid, iter_t *iter)
return 0;
} /* end freespace_stats() */
/*-------------------------------------------------------------------------
* Function: hand_free
*
@ -862,7 +897,7 @@ hand_free(struct handler_t *hand)
} /* end if */
} /* end hand_free() */
/*-------------------------------------------------------------------------
* Function: parse_command_line
*
@ -1014,6 +1049,119 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret)
} /* end if */
break;
case 'w':
#ifndef H5_HAVE_ROS3_VFD
error_msg("Read-Only S3 VFD not enabled.\n");
goto error;
#else
{
char *cred_str = NULL;
unsigned nelems = 0;
char **cred = NULL;
char const *ccred[3];
if (FAIL == parse_tuple((const char *)opt_arg, ',',
&cred_str, &nelems, &cred)) {
error_msg("Unable to parse s3 credential\n");
goto error;
}
if (nelems != 3) {
error_msg("s3 credential must have three elements\n");
goto error;
}
ccred[0] = (const char *)cred[0];
ccred[1] = (const char *)cred[1];
ccred[2] = (const char *)cred[2];
if (0 ==
h5tools_populate_ros3_fapl(&ros3_fa, ccred))
{
error_msg("Unable to set ros3 fapl config\n");
goto error;
}
HDfree(cred);
HDfree(cred_str);
} /* parse s3-cred block */
drivername = "ros3";
break;
#endif /* H5_HAVE_ROS3_VFD */
case 'H':
#ifndef H5_HAVE_LIBHDFS
error_msg("HDFS VFD is not enabled.\n");
goto error;
#else
{
unsigned nelems = 0;
char *props_src = NULL;
char **props = NULL;
unsigned long k = 0;
if (FAIL == parse_tuple(
(const char *)opt_arg,
',',
&props_src,
&nelems,
&props))
{
error_msg("unable to parse hdfs properties tuple\n");
goto error;
}
/* sanity-check tuple count
*/
if (nelems != 5) {
char str[64] = "";
sprintf(str,
"expected 5 elements in hdfs properties tuple "
"but found %u\n",
nelems);
HDfree(props);
HDfree(props_src);
error_msg(str);
goto error;
}
/* Populate fapl configuration structure with given
* properties.
* TODO/WARNING: No error-checking is done on length of
* input strings... Silent overflow is possible,
* albeit unlikely.
*/
if (strncmp(props[0], "", 1)) {
HDstrncpy(hdfs_fa.namenode_name,
(const char *)props[0],
HDstrlen(props[0]));
}
if (strncmp(props[1], "", 1)) {
k = strtoul((const char *)props[1], NULL, 0);
if (errno == ERANGE) {
error_msg("supposed port number wasn't.\n");
goto error;
}
hdfs_fa.namenode_port = (int32_t)k;
}
if (strncmp(props[2], "", 1)) {
HDstrncpy(hdfs_fa.kerberos_ticket_cache,
(const char *)props[2],
HDstrlen(props[2]));
}
if (strncmp(props[3], "", 1)) {
HDstrncpy(hdfs_fa.user_name,
(const char *)props[3],
HDstrlen(props[3]));
}
if (strncmp(props[4], "", 1)) {
k = strtoul((const char *)props[4], NULL, 0);
if (errno == ERANGE) {
error_msg("supposed buffersize number wasn't.\n");
goto error;
}
hdfs_fa.stream_buffer_size = (int32_t)k;
}
HDfree(props);
HDfree(props_src);
drivername = "hdfs";
}
break;
#endif /* H5_HAVE_LIBHDFS */
default:
usage(h5tools_getprogname());
goto error;
@ -1040,7 +1188,7 @@ error:
return -1;
}
/*-------------------------------------------------------------------------
* Function: iter_free
*
@ -1105,7 +1253,7 @@ iter_free(iter_t *iter)
} /* end if */
} /* end iter_free() */
/*-------------------------------------------------------------------------
* Function: print_file_info
*
@ -1137,7 +1285,7 @@ print_file_info(const iter_t *iter)
return 0;
} /* print_file_info() */
/*-------------------------------------------------------------------------
* Function: print_file_metadata
*
@ -1197,7 +1345,7 @@ print_file_metadata(const iter_t *iter)
return 0;
} /* print_file_metadata() */
/*-------------------------------------------------------------------------
* Function: print_group_info
*
@ -1254,7 +1402,7 @@ print_group_info(const iter_t *iter)
return 0;
} /* print_group_info() */
/*-------------------------------------------------------------------------
* Function: print_group_metadata
*
@ -1281,7 +1429,7 @@ print_group_metadata(const iter_t *iter)
return 0;
} /* print_group_metadata() */
/*-------------------------------------------------------------------------
* Function: print_dataset_info
*
@ -1368,7 +1516,7 @@ print_dataset_info(const iter_t *iter)
return 0;
} /* print_dataset_info() */
/*-------------------------------------------------------------------------
* Function: print_dataset_metadata
*
@ -1397,7 +1545,7 @@ print_dset_metadata(const iter_t *iter)
return 0;
} /* print_dset_metadata() */
/*-------------------------------------------------------------------------
* Function: print_dset_dtype_meta
*
@ -1438,7 +1586,7 @@ print_dset_dtype_meta(const iter_t *iter)
return 0;
} /* print_dset_dtype_meta() */
/*-------------------------------------------------------------------------
* Function: print_attr_info
*
@ -1487,7 +1635,7 @@ print_attr_info(const iter_t *iter)
return 0;
} /* print_attr_info() */
/*-------------------------------------------------------------------------
* Function: print_freespace_info
*
@ -1537,7 +1685,7 @@ print_freespace_info(const iter_t *iter)
return 0;
} /* print_freespace_info() */
/*-------------------------------------------------------------------------
* Function: print_storage_summary
*
@ -1601,7 +1749,7 @@ print_storage_summary(const iter_t *iter)
return 0;
} /* print_storage_summary() */
/*-------------------------------------------------------------------------
* Function: print_file_statistics
*
@ -1648,7 +1796,7 @@ print_file_statistics(const iter_t *iter)
if(display_summary) print_storage_summary(iter);
} /* print_file_statistics() */
/*-------------------------------------------------------------------------
* Function: print_object_statistics
*
@ -1671,7 +1819,7 @@ print_object_statistics(const char *name)
printf("Object name %s\n", name);
} /* print_object_statistics() */
/*-------------------------------------------------------------------------
* Function: print_statistics
*
@ -1697,7 +1845,7 @@ print_statistics(const char *name, const iter_t *iter)
print_file_statistics(iter);
} /* print_statistics() */
/*-------------------------------------------------------------------------
* Function: main
*
@ -1718,6 +1866,7 @@ main(int argc, const char *argv[])
void *edata;
void *tools_edata;
struct handler_t *hand = NULL;
hid_t fapl_id = H5P_DEFAULT;
h5tools_setprogname(PROGRAMNAME);
h5tools_setstatus(EXIT_SUCCESS);
@ -1738,6 +1887,45 @@ main(int argc, const char *argv[])
if(parse_command_line(argc, argv, &hand) < 0)
goto done;
/* if drivername is not null, probably need to set the fapl */
if (HDstrcmp(drivername, "")) {
void *conf_fa = NULL;
if (!HDstrcmp(drivername, "ros3")) {
#ifndef H5_HAVE_ROS3_VFD
error_msg("Read-Only S3 VFD not enabled.\n\n");
goto done;
#else
conf_fa = (void *)&ros3_fa;
#endif /* H5_HAVE_ROS3_VFD */
} else if (!HDstrcmp(drivername, "hdfs")) {
#ifndef H5_HAVE_LIBHDFS
error_msg("HDFS VFD not enabled.\n\n");
goto done;
#else
conf_fa = (void *)&hdfs_fa;
#endif /* H5_HAVE_LIBHDFS */
}
if (conf_fa != NULL) {
HDassert(fapl_id == H5P_DEFAULT);
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
if (fapl_id < 0) {
error_msg("Unable to create fapl entry\n");
goto done;
}
if (1 > h5tools_set_configured_fapl(
fapl_id,
drivername,
conf_fa))
{
error_msg("Unable to set fapl\n");
goto done;
}
}
} /* drivername set */
fname = argv[opt_ind];
if(enable_error_stack > 0) {
@ -1752,7 +1940,7 @@ main(int argc, const char *argv[])
printf("Filename: %s\n", fname);
fid = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT);
fid = H5Fopen(fname, H5F_ACC_RDONLY, fapl_id);
if(fid < 0) {
error_msg("unable to open file \"%s\"\n", fname);
h5tools_setstatus(EXIT_FAILURE);
@ -1833,6 +2021,13 @@ done:
/* Free iter structure */
iter_free(&iter);
if (fapl_id != H5P_DEFAULT) {
if (0 < H5Pclose(fapl_id)) {
error_msg("unable to close fapl entry\n");
h5tools_setstatus(EXIT_FAILURE);
}
}
if(fid >= 0 && H5Fclose(fid) < 0) {
error_msg("unable to close file \"%s\"\n", fname);
h5tools_setstatus(EXIT_FAILURE);

View File

@ -23,3 +23,13 @@ Usage: h5stat [OPTIONS] file
-s, --freespace Print free space information
-S, --summary Print summary of file space information
--enable-error-stack Prints messages from the HDF5 error stack as they occur
--s3-cred=<cred> Access file on S3, using provided credential
<cred> :: (region,id,key)
If <cred> == "(,,)", no authentication is used.
--hdfs-attrs=<attrs> Access a file on HDFS with given configuration
attributes.
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
If an attribute is empty, a default value will be
used.

View File

@ -23,3 +23,13 @@ Usage: h5stat [OPTIONS] file
-s, --freespace Print free space information
-S, --summary Print summary of file space information
--enable-error-stack Prints messages from the HDF5 error stack as they occur
--s3-cred=<cred> Access file on S3, using provided credential
<cred> :: (region,id,key)
If <cred> == "(,,)", no authentication is used.
--hdfs-attrs=<attrs> Access a file on HDFS with given configuration
attributes.
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
If an attribute is empty, a default value will be
used.

View File

@ -23,3 +23,13 @@ Usage: h5stat [OPTIONS] file
-s, --freespace Print free space information
-S, --summary Print summary of file space information
--enable-error-stack Prints messages from the HDF5 error stack as they occur
--s3-cred=<cred> Access file on S3, using provided credential
<cred> :: (region,id,key)
If <cred> == "(,,)", no authentication is used.
--hdfs-attrs=<attrs> Access a file on HDFS with given configuration
attributes.
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
If an attribute is empty, a default value will be
used.

View File

@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
-b B, --binary=B Binary file output, of form B
-O F, --ddl=F Output ddl text into file F
Use blank(empty) filename F to suppress ddl display
--s3-cred=<cred> Supply S3 authentication information to "ros3" vfd.
<cred> :: "(<aws-region>,<access-id>,<access-key>)"
If absent or <cred> -> "(,,)", no authentication.
Has no effect is filedriver is not `ros3'.
--hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
For use with "--filedriver=hdfs"
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
Any absent attribute will use a default value.
--------------- Object Options ---------------
-a P, --attribute=P Print the specified attribute
If an attribute name contains a slash (/), escape the

View File

@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
-V, --version Print version number and exit
--vfd=DRIVER Use the specified virtual file driver
-x, --hexdump Show raw data in hexadecimal format
--s3-cred=C Supply S3 authentication information to "ros3" vfd.
Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
If absent or C->"(,,)", defaults to no-authentication.
Has no effect if vfd flag not set to "ros3".
--hdfs-attrs=A Supply configuration information to Hadoop VFD.
Accepts tuple of (<namenode name>,<namenode port>,
...<kerberos cache path>,<username>,<buffer size>)
If absent or A == '(,,,,)', all default values are used.
Has no effect if vfd flag is not 'hdfs'.
file/OBJECT
Each object consists of an HDF5 file name optionally followed by a

View File

@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
-V, --version Print version number and exit
--vfd=DRIVER Use the specified virtual file driver
-x, --hexdump Show raw data in hexadecimal format
--s3-cred=C Supply S3 authentication information to "ros3" vfd.
Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
If absent or C->"(,,)", defaults to no-authentication.
Has no effect if vfd flag not set to "ros3".
--hdfs-attrs=A Supply configuration information to Hadoop VFD.
Accepts tuple of (<namenode name>,<namenode port>,
...<kerberos cache path>,<username>,<buffer size>)
If absent or A == '(,,,,)', all default values are used.
Has no effect if vfd flag is not 'hdfs'.
file/OBJECT
Each object consists of an HDF5 file name optionally followed by a

View File

@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
-V, --version Print version number and exit
--vfd=DRIVER Use the specified virtual file driver
-x, --hexdump Show raw data in hexadecimal format
--s3-cred=C Supply S3 authentication information to "ros3" vfd.
Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
If absent or C->"(,,)", defaults to no-authentication.
Has no effect if vfd flag not set to "ros3".
--hdfs-attrs=A Supply configuration information to Hadoop VFD.
Accepts tuple of (<namenode name>,<namenode port>,
...<kerberos cache path>,<username>,<buffer size>)
If absent or A == '(,,,,)', all default values are used.
Has no effect if vfd flag is not 'hdfs'.
file/OBJECT
Each object consists of an HDF5 file name optionally followed by a

View File

@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
-b B, --binary=B Binary file output, of form B
-O F, --ddl=F Output ddl text into file F
Use blank(empty) filename F to suppress ddl display
--s3-cred=<cred> Supply S3 authentication information to "ros3" vfd.
<cred> :: "(<aws-region>,<access-id>,<access-key>)"
If absent or <cred> -> "(,,)", no authentication.
Has no effect is filedriver is not `ros3'.
--hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
For use with "--filedriver=hdfs"
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
Any absent attribute will use a default value.
--------------- Object Options ---------------
-a P, --attribute=P Print the specified attribute
If an attribute name contains a slash (/), escape the

View File

@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
-b B, --binary=B Binary file output, of form B
-O F, --ddl=F Output ddl text into file F
Use blank(empty) filename F to suppress ddl display
--s3-cred=<cred> Supply S3 authentication information to "ros3" vfd.
<cred> :: "(<aws-region>,<access-id>,<access-key>)"
If absent or <cred> -> "(,,)", no authentication.
Has no effect is filedriver is not `ros3'.
--hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
For use with "--filedriver=hdfs"
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
Any absent attribute will use a default value.
--------------- Object Options ---------------
-a P, --attribute=P Print the specified attribute
If an attribute name contains a slash (/), escape the

View File

@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
-b B, --binary=B Binary file output, of form B
-O F, --ddl=F Output ddl text into file F
Use blank(empty) filename F to suppress ddl display
--s3-cred=<cred> Supply S3 authentication information to "ros3" vfd.
<cred> :: "(<aws-region>,<access-id>,<access-key>)"
If absent or <cred> -> "(,,)", no authentication.
Has no effect is filedriver is not `ros3'.
--hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
For use with "--filedriver=hdfs"
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
Any absent attribute will use a default value.
--------------- Object Options ---------------
-a P, --attribute=P Print the specified attribute
If an attribute name contains a slash (/), escape the

View File

@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
-b B, --binary=B Binary file output, of form B
-O F, --ddl=F Output ddl text into file F
Use blank(empty) filename F to suppress ddl display
--s3-cred=<cred> Supply S3 authentication information to "ros3" vfd.
<cred> :: "(<aws-region>,<access-id>,<access-key>)"
If absent or <cred> -> "(,,)", no authentication.
Has no effect is filedriver is not `ros3'.
--hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
For use with "--filedriver=hdfs"
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
Any absent attribute will use a default value.
--------------- Object Options ---------------
-a P, --attribute=P Print the specified attribute
If an attribute name contains a slash (/), escape the

View File

@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
-b B, --binary=B Binary file output, of form B
-O F, --ddl=F Output ddl text into file F
Use blank(empty) filename F to suppress ddl display
--s3-cred=<cred> Supply S3 authentication information to "ros3" vfd.
<cred> :: "(<aws-region>,<access-id>,<access-key>)"
If absent or <cred> -> "(,,)", no authentication.
Has no effect is filedriver is not `ros3'.
--hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
For use with "--filedriver=hdfs"
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
Any absent attribute will use a default value.
--------------- Object Options ---------------
-a P, --attribute=P Print the specified attribute
If an attribute name contains a slash (/), escape the

View File

@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
-b B, --binary=B Binary file output, of form B
-O F, --ddl=F Output ddl text into file F
Use blank(empty) filename F to suppress ddl display
--s3-cred=<cred> Supply S3 authentication information to "ros3" vfd.
<cred> :: "(<aws-region>,<access-id>,<access-key>)"
If absent or <cred> -> "(,,)", no authentication.
Has no effect is filedriver is not `ros3'.
--hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
For use with "--filedriver=hdfs"
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
Any absent attribute will use a default value.
--------------- Object Options ---------------
-a P, --attribute=P Print the specified attribute
If an attribute name contains a slash (/), escape the

View File

@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
-b B, --binary=B Binary file output, of form B
-O F, --ddl=F Output ddl text into file F
Use blank(empty) filename F to suppress ddl display
--s3-cred=<cred> Supply S3 authentication information to "ros3" vfd.
<cred> :: "(<aws-region>,<access-id>,<access-key>)"
If absent or <cred> -> "(,,)", no authentication.
Has no effect is filedriver is not `ros3'.
--hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
For use with "--filedriver=hdfs"
<attrs> :: (<namenode name>,<namenode port>,
<kerberos cache path>,<username>,
<buffer size>)
Any absent attribute will use a default value.
--------------- Object Options ---------------
-a P, --attribute=P Print the specified attribute
If an attribute name contains a slash (/), escape the

View File

@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
-V, --version Print version number and exit
--vfd=DRIVER Use the specified virtual file driver
-x, --hexdump Show raw data in hexadecimal format
--s3-cred=C Supply S3 authentication information to "ros3" vfd.
Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
If absent or C->"(,,)", defaults to no-authentication.
Has no effect if vfd flag not set to "ros3".
--hdfs-attrs=A Supply configuration information to Hadoop VFD.
Accepts tuple of (<namenode name>,<namenode port>,
...<kerberos cache path>,<username>,<buffer size>)
If absent or A == '(,,,,)', all default values are used.
Has no effect if vfd flag is not 'hdfs'.
file/OBJECT
Each object consists of an HDF5 file name optionally followed by a

View File

@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
-V, --version Print version number and exit
--vfd=DRIVER Use the specified virtual file driver
-x, --hexdump Show raw data in hexadecimal format
--s3-cred=C Supply S3 authentication information to "ros3" vfd.
Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
If absent or C->"(,,)", defaults to no-authentication.
Has no effect if vfd flag not set to "ros3".
--hdfs-attrs=A Supply configuration information to Hadoop VFD.
Accepts tuple of (<namenode name>,<namenode port>,
...<kerberos cache path>,<username>,<buffer size>)
If absent or A == '(,,,,)', all default values are used.
Has no effect if vfd flag is not 'hdfs'.
file/OBJECT
Each object consists of an HDF5 file name optionally followed by a