mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-24 19:40:45 +08:00
Added support for fixed sized tensors.
Improved support for tensor expressions.
This commit is contained in:
parent
c0f2cb016e
commit
0320f7e3a7
@ -31,6 +31,7 @@
|
||||
#include "Eigen/Core"
|
||||
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h"
|
||||
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorBase.h"
|
||||
@ -41,6 +42,7 @@
|
||||
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/Tensor.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorMap.h"
|
||||
|
||||
#include "Eigen/src/Core/util/ReenableStupidWarnings.h"
|
||||
|
@ -112,7 +112,7 @@ template<typename a, typename... as> struct get<0, type_lis
|
||||
template<int n EIGEN_TPL_PP_SPEC_HACK_DEFC(typename, as)> struct get<n, type_list<EIGEN_TPL_PP_SPEC_HACK_USE(as)>> { static_assert((n - n) < 0, "meta-template get: The element to extract from a list must be smaller than the size of the list."); };
|
||||
|
||||
template<typename T, int n, T a, T... as> struct get<n, numeric_list<T, a, as...>> : get<n-1, numeric_list<T, as...>> {};
|
||||
template<typename T, T a, T... as> struct get<0, numeric_list<T, a, as...>> { constexpr static int value = a; };
|
||||
template<typename T, T a, T... as> struct get<0, numeric_list<T, a, as...>> { constexpr static T value = a; };
|
||||
template<typename T, int n EIGEN_TPL_PP_SPEC_HACK_DEFC(T, as)> struct get<n, numeric_list<T EIGEN_TPL_PP_SPEC_HACK_USEC(as)>> { static_assert((n - n) < 0, "meta-template get: The element to extract from a list must be smaller than the size of the list."); };
|
||||
|
||||
/* always get type, regardless of dummy; good for parameter pack expansion */
|
||||
|
@ -17,9 +17,6 @@
|
||||
#error Intel Compiler only supports required C++ features since version 13.1.
|
||||
// note that most stuff in principle works with 13.0 but when combining
|
||||
// some features, at some point 13.0 will just fail with an internal assertion
|
||||
#elif defined(__clang__) && (__clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 1))
|
||||
// note that it _should_ work with 3.1 but it was only tested with 3.2
|
||||
#error Clang C++ Compiler (clang++) only supports required C++ features since version 3.1.
|
||||
#elif defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 6))
|
||||
// G++ < 4.6 by default will continue processing the source files - even if we use #error to make
|
||||
// it error out. For this reason, we use the pragma to make sure G++ aborts at the first error
|
||||
@ -40,17 +37,10 @@
|
||||
#error This library needs at least a C++11 compliant compiler. If you use g++/clang, please enable the -std=c++11 compiler flag. (-std=c++0x on older versions.)
|
||||
#endif
|
||||
|
||||
using std::array;
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// Use std::array as Eigen array
|
||||
/*template <typename T, size_t N>
|
||||
struct array : public std::array<T, N> {
|
||||
array() = default;
|
||||
array(const std::initializer_list<T>& a);// : std::array<T, N>(a) {};
|
||||
array(const std::array<T, N>& a);
|
||||
};*/
|
||||
template <typename T, std::size_t N> using array = std::array<T, N>;
|
||||
|
||||
namespace internal {
|
||||
|
||||
|
@ -11,16 +11,63 @@
|
||||
#define EIGEN_EMULATE_CXX11_META_H
|
||||
|
||||
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// The array class is only available starting with cxx11. Emulate our own here
|
||||
// if needed
|
||||
template <typename T, size_t n> class array {
|
||||
public:
|
||||
T& operator[] (size_t index) { return values[index]; }
|
||||
const T& operator[] (size_t index) const { return values[index]; }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE T& operator[] (size_t index) { return values[index]; }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const T& operator[] (size_t index) const { return values[index]; }
|
||||
|
||||
T values[n];
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE array() { }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE array(const T& v) {
|
||||
EIGEN_STATIC_ASSERT(n==1, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
values[0] = v;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE array(const T& v1, const T& v2) {
|
||||
EIGEN_STATIC_ASSERT(n==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
values[0] = v1;
|
||||
values[1] = v2;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3) {
|
||||
EIGEN_STATIC_ASSERT(n==3, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
values[0] = v1;
|
||||
values[1] = v2;
|
||||
values[2] = v3;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4) {
|
||||
EIGEN_STATIC_ASSERT(n==4, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
values[0] = v1;
|
||||
values[1] = v2;
|
||||
values[2] = v3;
|
||||
values[3] = v4;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4, const T& v5) {
|
||||
EIGEN_STATIC_ASSERT(n==5, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
values[0] = v1;
|
||||
values[1] = v2;
|
||||
values[2] = v3;
|
||||
values[3] = v4;
|
||||
values[4] = v5;
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
array(std::initializer_list<T> l) {
|
||||
std::copy(l.begin(), l.end(), values);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@ -35,8 +82,10 @@ namespace internal {
|
||||
struct empty_list { static const std::size_t count = 0; };
|
||||
|
||||
template<typename T, typename Tail=empty_list> struct type_list {
|
||||
T head;
|
||||
Tail tail;
|
||||
typedef T HeadType;
|
||||
typedef Tail TailType;
|
||||
static const T head;
|
||||
static const Tail tail;
|
||||
static const std::size_t count = 1 + Tail::count;
|
||||
};
|
||||
|
||||
@ -54,9 +103,25 @@ template<> struct make_type_list<> {
|
||||
};
|
||||
|
||||
|
||||
template <std::size_t index, class TList> struct get_type;
|
||||
|
||||
template <class Head, class Tail>
|
||||
struct get_type<0, type_list<Head, Tail> >
|
||||
{
|
||||
typedef Head type;
|
||||
};
|
||||
|
||||
template <std::size_t i, class Head, class Tail>
|
||||
struct get_type<i, type_list<Head, Tail> >
|
||||
{
|
||||
typedef typename get_type<i-1, Tail>::type type;
|
||||
};
|
||||
|
||||
|
||||
/* numeric list */
|
||||
template <typename T, T n>
|
||||
struct type2val {
|
||||
typedef T type;
|
||||
static const T value = n;
|
||||
};
|
||||
|
||||
@ -84,6 +149,28 @@ template<typename T, T V> struct gen_numeric_list_repeated<T, 5, V> {
|
||||
};
|
||||
|
||||
|
||||
template <std::size_t index, class NList> struct get;
|
||||
|
||||
template <class Head, class Tail>
|
||||
struct get<0, type_list<Head, Tail> >
|
||||
{
|
||||
typedef typename Head::type type;
|
||||
static const type value = Head::value;
|
||||
};
|
||||
|
||||
template <std::size_t i, class Head, class Tail>
|
||||
struct get<i, type_list<Head, Tail> >
|
||||
{
|
||||
typedef typename get<i-1, Tail>::type type;
|
||||
static const type value = get<i-1, Tail>::value;
|
||||
};
|
||||
|
||||
template <class NList> struct arg_prod {
|
||||
static const typename NList::HeadType::type value = get<0, NList>::value * arg_prod<typename NList::TailType>::value;
|
||||
};
|
||||
template <> struct arg_prod<empty_list> {
|
||||
static const int value = 1;
|
||||
};
|
||||
|
||||
template<int n, typename t>
|
||||
array<t, n> repeat(t v) {
|
||||
|
@ -60,26 +60,6 @@ namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
|
||||
struct tensor_index_linearization_helper
|
||||
{
|
||||
static inline Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const& dimensions)
|
||||
{
|
||||
return array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
|
||||
array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
|
||||
tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Index, std::size_t NumIndices, bool RowMajor>
|
||||
struct tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
|
||||
{
|
||||
static inline Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const&)
|
||||
{
|
||||
return array_get<RowMajor ? 0 : NumIndices - 1>(indices);
|
||||
}
|
||||
};
|
||||
|
||||
/* Forward-declaration required for the symmetry support. */
|
||||
template<typename Tensor_, typename Symmetry_, int Flags = 0> class tensor_symmetry_value_setter;
|
||||
|
||||
@ -102,13 +82,15 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_> >
|
||||
static const int Options = Options_;
|
||||
static const std::size_t NumIndices = NumIndices_;
|
||||
|
||||
typedef DSizes<DenseIndex, NumIndices_> Dimensions;
|
||||
|
||||
protected:
|
||||
TensorStorage<Scalar, NumIndices, Dynamic, Options> m_storage;
|
||||
|
||||
public:
|
||||
EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
|
||||
EIGEN_STRONG_INLINE array<Index, NumIndices> dimensions() const { return m_storage.dimensions(); }
|
||||
EIGEN_STRONG_INLINE Index size() const { return internal::array_prod(m_storage.dimensions()); }
|
||||
EIGEN_STRONG_INLINE const DSizes<DenseIndex, NumIndices_>& dimensions() const { return m_storage.dimensions(); }
|
||||
EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
|
||||
EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
|
||||
EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
|
||||
|
||||
@ -232,13 +214,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_> >
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
// inline Tensor(Self&& other)
|
||||
// : m_storage(other.m_storage)
|
||||
// {
|
||||
// }
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... IndexTypes>
|
||||
inline Tensor(Index firstDimension, IndexTypes... otherDimensions)
|
||||
@ -327,7 +302,11 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_> >
|
||||
|
||||
inline Index linearizedIndex(const array<Index, NumIndices>& indices) const
|
||||
{
|
||||
return internal::tensor_index_linearization_helper<Index, NumIndices, NumIndices - 1, Options&RowMajor>::run(indices, m_storage.dimensions());
|
||||
if (Options&RowMajor) {
|
||||
return m_storage.dimensions().IndexOfRowMajor(indices);
|
||||
} else {
|
||||
return m_storage.dimensions().IndexOfColMajor(indices);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -62,6 +62,20 @@ class TensorBase
|
||||
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived>
|
||||
cwiseAbs() const { return derived(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived>
|
||||
cwisePow(Scalar exponent) const {
|
||||
return TensorCwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived>
|
||||
(derived(), internal::scalar_pow_op<Scalar>(exponent));
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const Derived>
|
||||
operator * (Scalar scale) const {
|
||||
return TensorCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const Derived>
|
||||
(derived(), internal::scalar_multiple_op<Scalar>(scale));
|
||||
}
|
||||
|
||||
// Coefficient-wise binary operators.
|
||||
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
const TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const OtherDerived>
|
||||
|
212
unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
Normal file
212
unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
Normal file
@ -0,0 +1,212 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H
|
||||
#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H
|
||||
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \internal
|
||||
*
|
||||
* \class TensorDimensions
|
||||
* \ingroup CXX11_Tensor_Module
|
||||
*
|
||||
* \brief Set of classes used to encode and store the dimensions of a Tensor.
|
||||
*
|
||||
* The Sizes class encodes as part of the type the number of dimensions and the
|
||||
* sizes corresponding to each dimension. It uses no storage space since it is
|
||||
* entirely known at compile time.
|
||||
* The DSizes class is its dynamic sibling: the number of dimensions is known
|
||||
* at compile time but the sizes are set during execution.
|
||||
*
|
||||
* \sa Tensor
|
||||
*/
|
||||
|
||||
|
||||
|
||||
// Boiler plate code
|
||||
namespace internal {
|
||||
|
||||
template<std::size_t n, typename Dimension> struct dget {
|
||||
static const std::size_t value = internal::get<n, typename Dimension::Base>::value;
|
||||
};
|
||||
|
||||
|
||||
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
|
||||
struct fixed_size_tensor_index_linearization_helper
|
||||
{
|
||||
template <typename Dimensions>
|
||||
static inline Index run(array<Index, NumIndices> const& indices,
|
||||
const Dimensions& dimensions)
|
||||
{
|
||||
return array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
|
||||
dget<RowMajor ? n : (NumIndices - n - 1), Dimensions>::value *
|
||||
fixed_size_tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Index, std::size_t NumIndices, bool RowMajor>
|
||||
struct fixed_size_tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
|
||||
{
|
||||
template <typename Dimensions>
|
||||
static inline Index run(array<Index, NumIndices> const& indices,
|
||||
const Dimensions&)
|
||||
{
|
||||
return array_get<RowMajor ? 0 : NumIndices - 1>(indices);
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
|
||||
// Fixed size
|
||||
#ifndef EIGEN_EMULATE_CXX11_META_H
|
||||
template <typename std::size_t... Indices>
|
||||
struct Sizes : internal::numeric_list<std::size_t, Indices...> {
|
||||
typedef internal::numeric_list<std::size_t, Indices...> Base;
|
||||
static const std::size_t total_size = internal::arg_prod(Indices...);
|
||||
|
||||
static std::size_t TotalSize() {
|
||||
return internal::arg_prod(Indices...);
|
||||
}
|
||||
|
||||
Sizes() { }
|
||||
template <typename DenseIndex>
|
||||
explicit Sizes(const array<DenseIndex, Base::count>&/* indices*/) {
|
||||
// todo: add assertion
|
||||
}
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
explicit Sizes(std::initializer_list<std::size_t>/* l*/) {
|
||||
// todo: add assertion
|
||||
}
|
||||
#endif
|
||||
|
||||
template <typename T> Sizes& operator = (const T&/* other*/) {
|
||||
// add assertion failure if the size of other is different
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename DenseIndex>
|
||||
size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
|
||||
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count - 1, false>::run(indices, *static_cast<const Base*>(this));
|
||||
}
|
||||
template <typename DenseIndex>
|
||||
size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
|
||||
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count - 1, true>::run(indices, *static_cast<const Base*>(this));
|
||||
}
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
template <std::size_t n>
|
||||
struct non_zero_size {
|
||||
typedef internal::type2val<std::size_t, n> type;
|
||||
};
|
||||
template <>
|
||||
struct non_zero_size<0> {
|
||||
typedef internal::null_type type;
|
||||
};
|
||||
|
||||
template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0, std::size_t V5=0> struct Sizes {
|
||||
typedef typename internal::make_type_list<typename non_zero_size<V1>::type, typename non_zero_size<V2>::type, typename non_zero_size<V3>::type, typename non_zero_size<V4>::type, typename non_zero_size<V5>::type >::type Base;
|
||||
static const size_t count = Base::count;
|
||||
static const std::size_t total_size = internal::arg_prod<Base>::value;
|
||||
|
||||
static const size_t TotalSize() {
|
||||
return internal::arg_prod<Base>::value;
|
||||
}
|
||||
|
||||
Sizes() { }
|
||||
template <typename DenseIndex>
|
||||
explicit Sizes(const array<DenseIndex, Base::count>& indices) {
|
||||
// todo: add assertion
|
||||
}
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
explicit Sizes(std::initializer_list<std::size_t> l) {
|
||||
// todo: add assertion
|
||||
}
|
||||
#endif
|
||||
|
||||
template <typename T> Sizes& operator = (const T& other) {
|
||||
// to do: check the size of other
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename DenseIndex>
|
||||
size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
|
||||
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count - 1, false>::run(indices, *this);
|
||||
}
|
||||
template <typename DenseIndex>
|
||||
size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
|
||||
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count - 1, true>::run(indices, *this);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
// Boiler plate
|
||||
namespace internal {
|
||||
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
|
||||
struct tensor_index_linearization_helper
|
||||
{
|
||||
static inline Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const& dimensions)
|
||||
{
|
||||
return array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
|
||||
array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
|
||||
tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Index, std::size_t NumIndices, bool RowMajor>
|
||||
struct tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
|
||||
{
|
||||
static inline Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const&)
|
||||
{
|
||||
return array_get<RowMajor ? 0 : NumIndices - 1>(indices);
|
||||
}
|
||||
};
|
||||
} // end namespace internal
|
||||
|
||||
|
||||
|
||||
// Dynamic size
|
||||
template <typename DenseIndex, std::size_t NumDims>
|
||||
struct DSizes : array<DenseIndex, NumDims> {
|
||||
typedef array<DenseIndex, NumDims> Base;
|
||||
|
||||
size_t TotalSize() const {
|
||||
return internal::array_prod(*static_cast<const Base*>(this));
|
||||
}
|
||||
|
||||
DSizes() { }
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
// explicit DSizes(std::initializer_list<DenseIndex> l) : Base(l) { }
|
||||
#endif
|
||||
explicit DSizes(const array<DenseIndex, NumDims>& a) : Base(a) { }
|
||||
|
||||
DSizes& operator = (const array<DenseIndex, NumDims>& other) {
|
||||
*static_cast<Base*>(this) = other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// A constexpr would be so much better here
|
||||
size_t IndexOfColMajor(const array<DenseIndex, NumDims>& indices) const {
|
||||
return internal::tensor_index_linearization_helper<DenseIndex, NumDims, NumDims - 1, false>::run(indices, *static_cast<const Base*>(this));
|
||||
}
|
||||
size_t IndexOfRowMajor(const array<DenseIndex, NumDims>& indices) const {
|
||||
return internal::tensor_index_linearization_helper<DenseIndex, NumDims, NumDims - 1, true>::run(indices, *static_cast<const Base*>(this));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H
|
@ -24,15 +24,12 @@ namespace Eigen {
|
||||
* TODO: add support for vectorization
|
||||
*/
|
||||
|
||||
|
||||
template<typename Derived>
|
||||
struct TensorEvaluator
|
||||
{
|
||||
typedef typename Derived::Index Index;
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef typename Derived::Scalar& CoeffReturnType;
|
||||
//typedef typename Derived::PacketScalar PacketScalar;
|
||||
typedef TensorEvaluator<Derived> nestedType;
|
||||
|
||||
TensorEvaluator(Derived& m)
|
||||
: m_data(const_cast<Scalar*>(m.data()))
|
||||
@ -72,7 +69,6 @@ template<typename UnaryOp, typename ArgType>
|
||||
struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType> >
|
||||
{
|
||||
typedef TensorCwiseUnaryOp<UnaryOp, ArgType> XprType;
|
||||
typedef TensorEvaluator<ArgType> nestedType;
|
||||
|
||||
TensorEvaluator(const XprType& op)
|
||||
: m_functor(op.functor()),
|
||||
@ -89,7 +85,7 @@ struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType> >
|
||||
|
||||
private:
|
||||
const UnaryOp m_functor;
|
||||
typename TensorEvaluator<ArgType>::nestedType m_argImpl;
|
||||
TensorEvaluator<ArgType> m_argImpl;
|
||||
};
|
||||
|
||||
|
||||
@ -99,8 +95,6 @@ template<typename BinaryOp, typename LeftArgType, typename RightArgType>
|
||||
struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArgType> >
|
||||
{
|
||||
typedef TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArgType> XprType;
|
||||
typedef TensorEvaluator<LeftArgType> leftType;
|
||||
typedef TensorEvaluator<RightArgType> rightType;
|
||||
|
||||
TensorEvaluator(const XprType& op)
|
||||
: m_functor(op.functor()),
|
||||
@ -118,8 +112,8 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
|
||||
|
||||
private:
|
||||
const BinaryOp m_functor;
|
||||
typename TensorEvaluator<LeftArgType>::nestedType m_leftImpl;
|
||||
typename TensorEvaluator<RightArgType>::nestedType m_rightImpl;
|
||||
TensorEvaluator<LeftArgType> m_leftImpl;
|
||||
TensorEvaluator<RightArgType> m_rightImpl;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -54,7 +54,7 @@ struct nested<TensorCwiseUnaryOp<UnaryOp, XprType>, 1, typename eval<TensorCwise
|
||||
|
||||
|
||||
template<typename UnaryOp, typename XprType>
|
||||
class TensorCwiseUnaryOp
|
||||
class TensorCwiseUnaryOp : public TensorBase<TensorCwiseUnaryOp<UnaryOp, XprType> >
|
||||
{
|
||||
public:
|
||||
typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::Scalar Scalar;
|
||||
@ -75,11 +75,6 @@ class TensorCwiseUnaryOp
|
||||
const typename internal::remove_all<typename XprType::Nested>::type&
|
||||
nestedExpression() const { return m_xpr; }
|
||||
|
||||
/** \returns the nested expression */
|
||||
EIGEN_DEVICE_FUNC
|
||||
typename internal::remove_all<typename XprType::Nested>::type&
|
||||
nestedExpression() { return m_xpr.const_cast_derived(); }
|
||||
|
||||
protected:
|
||||
typename XprType::Nested m_xpr;
|
||||
const UnaryOp m_functor;
|
||||
@ -124,7 +119,7 @@ struct nested<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>, 1, typename
|
||||
|
||||
|
||||
template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
|
||||
class TensorCwiseBinaryOp
|
||||
class TensorCwiseBinaryOp : public TensorBase<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >
|
||||
{
|
||||
public:
|
||||
typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::Scalar Scalar;
|
||||
|
232
unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
Normal file
232
unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
Normal file
@ -0,0 +1,232 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
|
||||
#define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class TensorFixedSize
|
||||
* \ingroup CXX11_Tensor_Module
|
||||
*
|
||||
* \brief The fixed sized version of the tensor class.
|
||||
*
|
||||
* The fixes sized equivalent of
|
||||
* Eigen::Tensor<float, 4> t(3, 5, 7);
|
||||
* is
|
||||
* Eigen::TensorFixedSize<float, Size<3,5,7>> t;
|
||||
*/
|
||||
|
||||
template<typename Scalar_, typename Dimensions_, int Options_>
|
||||
class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_> >
|
||||
{
|
||||
public:
|
||||
typedef TensorFixedSize<Scalar_, Dimensions_, Options_> Self;
|
||||
typedef TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_> > Base;
|
||||
typedef typename Eigen::internal::nested<Self>::type Nested;
|
||||
typedef typename internal::traits<Self>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Self>::Index Index;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType;
|
||||
|
||||
static const int Options = Options_;
|
||||
typedef Dimensions_ Dimensions;
|
||||
static const std::size_t NumIndices = Dimensions::count;
|
||||
|
||||
protected:
|
||||
TensorStorage<Scalar, NumIndices, Dimensions::total_size, Options, Dimensions> m_storage;
|
||||
|
||||
public:
|
||||
EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
|
||||
EIGEN_STRONG_INLINE array<Index, NumIndices> dimensions() const { return m_storage.dimensions(); }
|
||||
EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
|
||||
EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
|
||||
EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
|
||||
|
||||
// This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
|
||||
// work, because that uses base().coeffRef() - and we don't yet
|
||||
// implement a similar class hierarchy
|
||||
inline Self& base() { return *this; }
|
||||
inline const Self& base() const { return *this; }
|
||||
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... IndexTypes>
|
||||
inline const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
|
||||
{
|
||||
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
|
||||
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
|
||||
}
|
||||
#endif
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
|
||||
{
|
||||
eigen_internal_assert(checkIndexRange(indices));
|
||||
return m_storage.data()[linearizedIndex(indices)];
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
|
||||
{
|
||||
eigen_internal_assert(index >= 0 && index < size());
|
||||
return m_storage.data()[index];
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... IndexTypes>
|
||||
inline Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
|
||||
{
|
||||
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
|
||||
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
|
||||
}
|
||||
#endif
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
|
||||
{
|
||||
eigen_internal_assert(checkIndexRange(indices));
|
||||
return m_storage.data()[linearizedIndex(indices)];
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
|
||||
{
|
||||
eigen_internal_assert(index >= 0 && index < size());
|
||||
return m_storage.data()[index];
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... IndexTypes>
|
||||
inline const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
|
||||
{
|
||||
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
|
||||
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
|
||||
}
|
||||
#endif
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
|
||||
{
|
||||
eigen_assert(checkIndexRange(indices));
|
||||
return coeff(indices);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
|
||||
{
|
||||
eigen_internal_assert(index >= 0 && index < size());
|
||||
return coeff(index);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
|
||||
{
|
||||
// The bracket operator is only for vectors, use the parenthesis operator instead.
|
||||
EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
return coeff(index);
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... IndexTypes>
|
||||
inline Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
|
||||
{
|
||||
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
|
||||
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
|
||||
}
|
||||
#endif
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
|
||||
{
|
||||
eigen_assert(checkIndexRange(indices));
|
||||
return coeffRef(indices);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index index)
|
||||
{
|
||||
eigen_assert(index >= 0 && index < size());
|
||||
return coeffRef(index);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator[](Index index)
|
||||
{
|
||||
// The bracket operator is only for vectors, use the parenthesis operator instead
|
||||
EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
return coeffRef(index);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorFixedSize()
|
||||
: m_storage()
|
||||
{
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorFixedSize(const Self& other)
|
||||
: m_storage(other.m_storage)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
inline TensorFixedSize(Self&& other)
|
||||
: m_storage(other.m_storage)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorFixedSize& operator=(const OtherDerived& other)
|
||||
{
|
||||
// FIXME: check that the dimensions of other match the dimensions of *this.
|
||||
// Unfortunately this isn't possible yet when the rhs is an expression.
|
||||
internal::TensorAssign<TensorFixedSize, const OtherDerived>::run(*this, other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
protected:
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE bool checkIndexRange(const array<Index, NumIndices>& /*indices*/) const
|
||||
{
|
||||
using internal::array_apply_and_reduce;
|
||||
using internal::array_zip_and_reduce;
|
||||
using internal::greater_equal_zero_op;
|
||||
using internal::logical_and_op;
|
||||
using internal::lesser_op;
|
||||
|
||||
return true;
|
||||
// check whether the indices are all >= 0
|
||||
/* array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
|
||||
// check whether the indices fit in the dimensions
|
||||
array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());*/
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
|
||||
{
|
||||
if (Options&RowMajor) {
|
||||
return m_storage.dimensions().IndexOfRowMajor(indices);
|
||||
} else {
|
||||
return m_storage.dimensions().IndexOfColMajor(indices);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
|
@ -13,6 +13,7 @@
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Scalar_, std::size_t NumIndices_, int Options_ = 0> class Tensor;
|
||||
template<typename Scalar_, typename Dimensions, int Options_ = 0> class TensorFixedSize;
|
||||
template<typename PlainObjectType> class TensorMap;
|
||||
template<typename Derived> class TensorBase;
|
||||
|
||||
|
@ -43,24 +43,38 @@ template<typename PlainObjectType> class TensorMap : public TensorBase<TensorMap
|
||||
typedef Scalar* PointerType;
|
||||
typedef PointerType PointerArgType;
|
||||
|
||||
// Fixed size plain object type only
|
||||
/* EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr) : m_data(dataPtr) {
|
||||
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
|
||||
//EIGEN_STATIC_ASSERT(1 == PlainObjectType::NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
// todo: add assert to ensure we don't screw up here.
|
||||
}*/
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions({{firstDimension}}) {
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions(array<DenseIndex, PlainObjectType::NumIndices>({{firstDimension}})) {
|
||||
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
|
||||
EIGEN_STATIC_ASSERT(1 == PlainObjectType::NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions({{firstDimension, otherDimensions...}}) {
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions(array<DenseIndex, PlainObjectType::NumIndices>({{firstDimension, otherDimensions...}})) {
|
||||
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
|
||||
EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == PlainObjectType::NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
#endif
|
||||
|
||||
inline TensorMap(PointerArgType dataPtr, const array<Index, PlainObjectType::NumIndices>& dimensions)
|
||||
: m_data(dataPtr), m_dimensions(dimensions)
|
||||
{ }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_dimensions[n]; }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index size() const { return internal::array_prod(m_dimensions); }
|
||||
EIGEN_STRONG_INLINE const typename PlainObjectType::Dimensions& dimensions() const { return m_dimensions; }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar* data() { return m_data; }
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -78,8 +92,13 @@ template<typename PlainObjectType> class TensorMap : public TensorBase<TensorMap
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
|
||||
{
|
||||
static_assert(sizeof...(otherIndices) + 1 == PlainObjectType::NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
|
||||
const Index index = internal::tensor_index_linearization_helper<Index, PlainObjectType::NumIndices, PlainObjectType::NumIndices - 1, PlainObjectType::Options&RowMajor>::run(array<Index, PlainObjectType::NumIndices>{{firstIndex, otherIndices...}}, m_dimensions);
|
||||
return m_data[index];
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = m_dimensions.IndexOfRowMajor(array<Index, PlainObjectType::NumIndices>{{firstIndex, otherIndices...}});
|
||||
return m_data[index];
|
||||
} else {
|
||||
const Index index = m_dimensions.IndexOfColMajor(array<Index, PlainObjectType::NumIndices>{{firstIndex, otherIndices...}});
|
||||
return m_data[index];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -93,7 +112,7 @@ template<typename PlainObjectType> class TensorMap : public TensorBase<TensorMap
|
||||
|
||||
private:
|
||||
typename PlainObjectType::Scalar* m_data;
|
||||
array<DenseIndex, PlainObjectType::NumIndices> m_dimensions;
|
||||
typename PlainObjectType::Dimensions m_dimensions;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -32,6 +32,35 @@ namespace Eigen {
|
||||
*/
|
||||
template<typename T, std::size_t NumIndices_, DenseIndex Size, int Options_, typename Dimensions = void> class TensorStorage;
|
||||
|
||||
|
||||
// Pure fixed-size storage
|
||||
template<typename T, std::size_t NumIndices_, DenseIndex Size, int Options_, typename FixedDimensions>
|
||||
class TensorStorage
|
||||
{
|
||||
private:
|
||||
T m_data[Size];
|
||||
FixedDimensions m_dimensions;
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorStorage() {
|
||||
EIGEN_STATIC_ASSERT(Size == FixedDimensions::total_size, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE T *data() { return m_data; }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const T *data() const { return m_data; }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const FixedDimensions dimensions() const { return m_dimensions; }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE DenseIndex size() const { return m_dimensions.TotalSize(); }
|
||||
};
|
||||
|
||||
|
||||
|
||||
// pure-dynamic, but without specification of all dimensions explicitly
|
||||
template<typename T, std::size_t NumIndices_, int Options_>
|
||||
class TensorStorage<T, NumIndices_, Dynamic, Options_, void>
|
||||
@ -44,7 +73,7 @@ class TensorStorage<T, NumIndices_, Dynamic, Options_, void>
|
||||
TensorStorage(const TensorStorage<T, NumIndices_, Dynamic, Options_, void>& other) : Base_(other) { }
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
// TensorStorage(TensorStorage<T, NumIndices_, Dynamic, Options_, void>&&) = default;
|
||||
// TensorStorage(TensorStorage<T, NumIndices_, Dynamic, Options_, void>&&) = default;
|
||||
#endif
|
||||
TensorStorage(internal::constructor_without_unaligned_array_assert) : Base_(internal::constructor_without_unaligned_array_assert()) {}
|
||||
TensorStorage(DenseIndex size, const array<DenseIndex, NumIndices_>& dimensions) : Base_(size, dimensions) {}
|
||||
@ -57,11 +86,11 @@ template<typename T, std::size_t NumIndices_, int Options_>
|
||||
class TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type>
|
||||
{
|
||||
T *m_data;
|
||||
array<DenseIndex, NumIndices_> m_dimensions;
|
||||
DSizes<DenseIndex, NumIndices_> m_dimensions;
|
||||
|
||||
typedef TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type> Self_;
|
||||
public:
|
||||
TensorStorage() : m_data(0), m_dimensions() {}
|
||||
TensorStorage() : m_data(0), m_dimensions() {}
|
||||
TensorStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(0), m_dimensions(internal::template repeat<NumIndices_, DenseIndex>(0)) {}
|
||||
TensorStorage(DenseIndex size, const array<DenseIndex, NumIndices_>& dimensions)
|
||||
@ -83,25 +112,25 @@ class TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_nu
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
/* TensorStorage(Self_&& other)
|
||||
/* TensorStorage(Self_&& other)
|
||||
: m_data(std::move(other.m_data)), m_dimensions(std::move(other.m_dimensions))
|
||||
{
|
||||
other.m_data = nullptr;
|
||||
}
|
||||
*/
|
||||
|
||||
Self_& operator=(Self_&& other)
|
||||
{
|
||||
using std::swap;
|
||||
swap(m_data, other.m_data);
|
||||
swap(m_dimensions, other.m_dimensions);
|
||||
return *this;
|
||||
}
|
||||
}*/
|
||||
#endif
|
||||
|
||||
~TensorStorage() { internal::conditional_aligned_delete_auto<T,(Options_&DontAlign)==0>(m_data, internal::array_prod(m_dimensions)); }
|
||||
void swap(Self_& other)
|
||||
{ std::swap(m_data,other.m_data); std::swap(m_dimensions,other.m_dimensions); }
|
||||
const array<DenseIndex, NumIndices_>& dimensions() const {return m_dimensions;}
|
||||
const DSizes<DenseIndex, NumIndices_>& dimensions() const {return m_dimensions;}
|
||||
|
||||
void conservativeResize(DenseIndex size, const array<DenseIndex, NumIndices_>& nbDimensions)
|
||||
{
|
||||
@ -124,9 +153,10 @@ class TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_nu
|
||||
|
||||
T *data() { return m_data; }
|
||||
const T *data() const { return m_data; }
|
||||
|
||||
DenseIndex size() const { return m_dimensions.TotalSize(); }
|
||||
};
|
||||
|
||||
// TODO: implement fixed-size stuff
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
|
@ -57,6 +57,15 @@ struct traits<Tensor<Scalar_, NumIndices_, Options_> >
|
||||
};
|
||||
|
||||
|
||||
template<typename Scalar_, typename Dimensions, int Options_>
|
||||
struct traits<TensorFixedSize<Scalar_, Dimensions, Options_> >
|
||||
{
|
||||
typedef Scalar_ Scalar;
|
||||
typedef Dense StorageKind;
|
||||
typedef DenseIndex Index;
|
||||
};
|
||||
|
||||
|
||||
template<typename PlainObjectType>
|
||||
struct traits<TensorMap<PlainObjectType> >
|
||||
: public traits<PlainObjectType>
|
||||
@ -68,16 +77,28 @@ struct traits<TensorMap<PlainObjectType> >
|
||||
};
|
||||
|
||||
|
||||
template<typename _Scalar, std::size_t NumIndices_, int Options_>
|
||||
struct eval<Tensor<_Scalar, NumIndices_, Options_>, Eigen::Dense>
|
||||
template<typename _Scalar, std::size_t NumIndices_, int Options>
|
||||
struct eval<Tensor<_Scalar, NumIndices_, Options>, Eigen::Dense>
|
||||
{
|
||||
typedef const Tensor<_Scalar, NumIndices_, Options_>& type;
|
||||
typedef const Tensor<_Scalar, NumIndices_, Options>& type;
|
||||
};
|
||||
|
||||
template<typename _Scalar, std::size_t NumIndices_, int Options_>
|
||||
struct eval<const Tensor<_Scalar, NumIndices_, Options_>, Eigen::Dense>
|
||||
template<typename _Scalar, std::size_t NumIndices_, int Options>
|
||||
struct eval<const Tensor<_Scalar, NumIndices_, Options>, Eigen::Dense>
|
||||
{
|
||||
typedef const Tensor<_Scalar, NumIndices_, Options_>& type;
|
||||
typedef const Tensor<_Scalar, NumIndices_, Options>& type;
|
||||
};
|
||||
|
||||
template<typename Scalar_, typename Dimensions, int Options>
|
||||
struct eval<TensorFixedSize<Scalar_, Dimensions, Options>, Eigen::Dense>
|
||||
{
|
||||
typedef const TensorFixedSize<Scalar_, Dimensions, Options>& type;
|
||||
};
|
||||
|
||||
template<typename Scalar_, typename Dimensions, int Options>
|
||||
struct eval<const TensorFixedSize<Scalar_, Dimensions, Options>, Eigen::Dense>
|
||||
{
|
||||
typedef const TensorFixedSize<Scalar_, Dimensions, Options>& type;
|
||||
};
|
||||
|
||||
template<typename PlainObjectType>
|
||||
@ -104,6 +125,18 @@ struct nested<const Tensor<Scalar_, NumIndices_, Options_>, 1, typename eval<con
|
||||
typedef const Tensor<Scalar_, NumIndices_, Options_>& type;
|
||||
};
|
||||
|
||||
template <typename Scalar_, typename Dimensions, int Options>
|
||||
struct nested<TensorFixedSize<Scalar_, Dimensions, Options>, 1, typename eval<TensorFixedSize<Scalar_, Dimensions, Options> >::type>
|
||||
{
|
||||
typedef const TensorFixedSize<Scalar_, Dimensions, Options>& type;
|
||||
};
|
||||
|
||||
template <typename Scalar_, typename Dimensions, int Options>
|
||||
struct nested<const TensorFixedSize<Scalar_, Dimensions, Options>, 1, typename eval<const TensorFixedSize<Scalar_, Dimensions, Options> >::type>
|
||||
{
|
||||
typedef const TensorFixedSize<Scalar_, Dimensions, Options>& type;
|
||||
};
|
||||
|
||||
template <typename PlainObjectType>
|
||||
struct nested<TensorMap<PlainObjectType>, 1, typename eval<TensorMap<PlainObjectType> >::type>
|
||||
{
|
||||
|
195
unsupported/test/cxx11_tensor_assign.cpp
Normal file
195
unsupported/test/cxx11_tensor_assign.cpp
Normal file
@ -0,0 +1,195 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#include "main.h"
|
||||
|
||||
#include <Eigen/CXX11/Tensor>
|
||||
|
||||
using Eigen::Tensor;
|
||||
using Eigen::RowMajor;
|
||||
|
||||
static void test_1d()
|
||||
{
|
||||
Tensor<int, 1> vec1(6);
|
||||
Tensor<int, 1, RowMajor> vec2(6);
|
||||
vec1(0) = 4; vec2(0) = 0;
|
||||
vec1(1) = 8; vec2(1) = 1;
|
||||
vec1(2) = 15; vec2(2) = 2;
|
||||
vec1(3) = 16; vec2(3) = 3;
|
||||
vec1(4) = 23; vec2(4) = 4;
|
||||
vec1(5) = 42; vec2(5) = 5;
|
||||
|
||||
int col_major[6];
|
||||
int row_major[6];
|
||||
memset(col_major, 0, 6*sizeof(int));
|
||||
memset(row_major, 0, 6*sizeof(int));
|
||||
TensorMap<Tensor<int, 1>> vec3(col_major, 6);
|
||||
TensorMap<Tensor<int, 1, RowMajor>> vec4(row_major, 6);
|
||||
|
||||
vec3 = vec1;
|
||||
vec4 = vec2;
|
||||
|
||||
VERIFY_IS_EQUAL(vec3(0), 4);
|
||||
VERIFY_IS_EQUAL(vec3(1), 8);
|
||||
VERIFY_IS_EQUAL(vec3(2), 15);
|
||||
VERIFY_IS_EQUAL(vec3(3), 16);
|
||||
VERIFY_IS_EQUAL(vec3(4), 23);
|
||||
VERIFY_IS_EQUAL(vec3(5), 42);
|
||||
|
||||
VERIFY_IS_EQUAL(vec4(0), 0);
|
||||
VERIFY_IS_EQUAL(vec4(1), 1);
|
||||
VERIFY_IS_EQUAL(vec4(2), 2);
|
||||
VERIFY_IS_EQUAL(vec4(3), 3);
|
||||
VERIFY_IS_EQUAL(vec4(4), 4);
|
||||
VERIFY_IS_EQUAL(vec4(5), 5);
|
||||
|
||||
vec1.setZero();
|
||||
vec2.setZero();
|
||||
vec1 = vec3;
|
||||
vec2 = vec4;
|
||||
|
||||
VERIFY_IS_EQUAL(vec1(0), 4);
|
||||
VERIFY_IS_EQUAL(vec1(1), 8);
|
||||
VERIFY_IS_EQUAL(vec1(2), 15);
|
||||
VERIFY_IS_EQUAL(vec1(3), 16);
|
||||
VERIFY_IS_EQUAL(vec1(4), 23);
|
||||
VERIFY_IS_EQUAL(vec1(5), 42);
|
||||
|
||||
VERIFY_IS_EQUAL(vec2(0), 0);
|
||||
VERIFY_IS_EQUAL(vec2(1), 1);
|
||||
VERIFY_IS_EQUAL(vec2(2), 2);
|
||||
VERIFY_IS_EQUAL(vec2(3), 3);
|
||||
VERIFY_IS_EQUAL(vec2(4), 4);
|
||||
VERIFY_IS_EQUAL(vec2(5), 5);
|
||||
}
|
||||
|
||||
static void test_2d()
|
||||
{
|
||||
Tensor<int, 2> mat1(2,3);
|
||||
Tensor<int, 2, RowMajor> mat2(2,3);
|
||||
|
||||
mat1(0,0) = 0;
|
||||
mat1(0,1) = 1;
|
||||
mat1(0,2) = 2;
|
||||
mat1(1,0) = 3;
|
||||
mat1(1,1) = 4;
|
||||
mat1(1,2) = 5;
|
||||
|
||||
mat2(0,0) = 0;
|
||||
mat2(0,1) = 1;
|
||||
mat2(0,2) = 2;
|
||||
mat2(1,0) = 3;
|
||||
mat2(1,1) = 4;
|
||||
mat2(1,2) = 5;
|
||||
|
||||
int col_major[6];
|
||||
int row_major[6];
|
||||
memset(col_major, 0, 6*sizeof(int));
|
||||
memset(row_major, 0, 6*sizeof(int));
|
||||
TensorMap<Tensor<int, 2>> mat3(row_major, 2, 3);
|
||||
TensorMap<Tensor<int, 2, RowMajor>> mat4(col_major, 2, 3);
|
||||
|
||||
mat3 = mat1;
|
||||
mat4 = mat2;
|
||||
|
||||
VERIFY_IS_EQUAL(mat3(0,0), 0);
|
||||
VERIFY_IS_EQUAL(mat3(0,1), 1);
|
||||
VERIFY_IS_EQUAL(mat3(0,2), 2);
|
||||
VERIFY_IS_EQUAL(mat3(1,0), 3);
|
||||
VERIFY_IS_EQUAL(mat3(1,1), 4);
|
||||
VERIFY_IS_EQUAL(mat3(1,2), 5);
|
||||
|
||||
VERIFY_IS_EQUAL(mat4(0,0), 0);
|
||||
VERIFY_IS_EQUAL(mat4(0,1), 1);
|
||||
VERIFY_IS_EQUAL(mat4(0,2), 2);
|
||||
VERIFY_IS_EQUAL(mat4(1,0), 3);
|
||||
VERIFY_IS_EQUAL(mat4(1,1), 4);
|
||||
VERIFY_IS_EQUAL(mat4(1,2), 5);
|
||||
|
||||
mat1.setZero();
|
||||
mat2.setZero();
|
||||
mat1 = mat3;
|
||||
mat2 = mat4;
|
||||
|
||||
VERIFY_IS_EQUAL(mat1(0,0), 0);
|
||||
VERIFY_IS_EQUAL(mat1(0,1), 1);
|
||||
VERIFY_IS_EQUAL(mat1(0,2), 2);
|
||||
VERIFY_IS_EQUAL(mat1(1,0), 3);
|
||||
VERIFY_IS_EQUAL(mat1(1,1), 4);
|
||||
VERIFY_IS_EQUAL(mat1(1,2), 5);
|
||||
|
||||
VERIFY_IS_EQUAL(mat2(0,0), 0);
|
||||
VERIFY_IS_EQUAL(mat2(0,1), 1);
|
||||
VERIFY_IS_EQUAL(mat2(0,2), 2);
|
||||
VERIFY_IS_EQUAL(mat2(1,0), 3);
|
||||
VERIFY_IS_EQUAL(mat2(1,1), 4);
|
||||
VERIFY_IS_EQUAL(mat2(1,2), 5);
|
||||
}
|
||||
|
||||
static void test_3d()
|
||||
{
|
||||
Tensor<int, 3> mat1(2,3,7);
|
||||
Tensor<int, 3, RowMajor> mat2(2,3,7);
|
||||
|
||||
int val = 0;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
mat1(i,j,k) = val;
|
||||
mat2(i,j,k) = val;
|
||||
val++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int col_major[2*3*7];
|
||||
int row_major[2*3*7];
|
||||
memset(col_major, 0, 2*3*7*sizeof(int));
|
||||
memset(row_major, 0, 2*3*7*sizeof(int));
|
||||
TensorMap<Tensor<int, 3>> mat3(col_major, 2, 3, 7);
|
||||
TensorMap<Tensor<int, 3, RowMajor>> mat4(row_major, 2, 3, 7);
|
||||
|
||||
mat3 = mat1;
|
||||
mat4 = mat2;
|
||||
|
||||
val = 0;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_EQUAL(mat3(i,j,k), val);
|
||||
VERIFY_IS_EQUAL(mat4(i,j,k), val);
|
||||
val++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mat1.setZero();
|
||||
mat2.setZero();
|
||||
mat1 = mat3;
|
||||
mat2 = mat4;
|
||||
|
||||
val = 0;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_EQUAL(mat1(i,j,k), val);
|
||||
VERIFY_IS_EQUAL(mat2(i,j,k), val);
|
||||
val++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void test_cxx11_tensor_assign()
|
||||
{
|
||||
CALL_SUBTEST(test_1d());
|
||||
CALL_SUBTEST(test_2d());
|
||||
CALL_SUBTEST(test_3d());
|
||||
}
|
145
unsupported/test/cxx11_tensor_expr.cpp
Normal file
145
unsupported/test/cxx11_tensor_expr.cpp
Normal file
@ -0,0 +1,145 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#include "main.h"
|
||||
|
||||
#include <Eigen/CXX11/Tensor>
|
||||
|
||||
using Eigen::Tensor;
|
||||
using Eigen::RowMajor;
|
||||
|
||||
static void test_1d()
|
||||
{
|
||||
Tensor<float, 1> vec1({6});
|
||||
Tensor<float, 1, RowMajor> vec2({6});
|
||||
|
||||
vec1(0) = 4.0; vec2(0) = 0.0;
|
||||
vec1(1) = 8.0; vec2(1) = 1.0;
|
||||
vec1(2) = 15.0; vec2(2) = 2.0;
|
||||
vec1(3) = 16.0; vec2(3) = 3.0;
|
||||
vec1(4) = 23.0; vec2(4) = 4.0;
|
||||
vec1(5) = 42.0; vec2(5) = 5.0;
|
||||
|
||||
float data3[6];
|
||||
TensorMap<Tensor<float, 1>> vec3(data3, 6);
|
||||
vec3 = vec1.cwiseSqrt();
|
||||
float data4[6];
|
||||
TensorMap<Tensor<float, 1, RowMajor>> vec4(data4, 6);
|
||||
vec4 = vec2.cwiseSqrt();
|
||||
|
||||
VERIFY_IS_APPROX(vec3(0), sqrtf(4.0));
|
||||
VERIFY_IS_APPROX(vec3(1), sqrtf(8.0));
|
||||
VERIFY_IS_APPROX(vec3(2), sqrtf(15.0));
|
||||
VERIFY_IS_APPROX(vec3(3), sqrtf(16.0));
|
||||
VERIFY_IS_APPROX(vec3(4), sqrtf(23.0));
|
||||
VERIFY_IS_APPROX(vec3(5), sqrtf(42.0));
|
||||
|
||||
VERIFY_IS_APPROX(vec4(0), sqrtf(0.0));
|
||||
VERIFY_IS_APPROX(vec4(1), sqrtf(1.0));
|
||||
VERIFY_IS_APPROX(vec4(2), sqrtf(2.0));
|
||||
VERIFY_IS_APPROX(vec4(3), sqrtf(3.0));
|
||||
VERIFY_IS_APPROX(vec4(4), sqrtf(4.0));
|
||||
VERIFY_IS_APPROX(vec4(5), sqrtf(5.0));
|
||||
|
||||
vec3 = vec1 + vec2;
|
||||
VERIFY_IS_APPROX(vec3(0), 4.0f + 0.0f);
|
||||
VERIFY_IS_APPROX(vec3(1), 8.0f + 1.0f);
|
||||
VERIFY_IS_APPROX(vec3(2), 15.0f + 2.0f);
|
||||
VERIFY_IS_APPROX(vec3(3), 16.0f + 3.0f);
|
||||
VERIFY_IS_APPROX(vec3(4), 23.0f + 4.0f);
|
||||
VERIFY_IS_APPROX(vec3(5), 42.0f + 5.0f);
|
||||
}
|
||||
|
||||
static void test_2d()
|
||||
{
|
||||
float data1[6];
|
||||
TensorMap<Tensor<float, 2>> mat1(data1, 2, 3);
|
||||
float data2[6];
|
||||
TensorMap<Tensor<float, 2, RowMajor>> mat2(data2, 2, 3);
|
||||
|
||||
mat1(0,0) = 0.0;
|
||||
mat1(0,1) = 1.0;
|
||||
mat1(0,2) = 2.0;
|
||||
mat1(1,0) = 3.0;
|
||||
mat1(1,1) = 4.0;
|
||||
mat1(1,2) = 5.0;
|
||||
|
||||
mat2(0,0) = -0.0;
|
||||
mat2(0,1) = -1.0;
|
||||
mat2(0,2) = -2.0;
|
||||
mat2(1,0) = -3.0;
|
||||
mat2(1,1) = -4.0;
|
||||
mat2(1,2) = -5.0;
|
||||
|
||||
Tensor<float, 2> mat3(2,3);
|
||||
Tensor<float, 2, RowMajor> mat4(2,3);
|
||||
mat3 = mat1.cwiseAbs();
|
||||
mat4 = mat2.cwiseAbs();
|
||||
|
||||
VERIFY_IS_APPROX(mat3(0,0), 0.0f);
|
||||
VERIFY_IS_APPROX(mat3(0,1), 1.0f);
|
||||
VERIFY_IS_APPROX(mat3(0,2), 2.0f);
|
||||
VERIFY_IS_APPROX(mat3(1,0), 3.0f);
|
||||
VERIFY_IS_APPROX(mat3(1,1), 4.0f);
|
||||
VERIFY_IS_APPROX(mat3(1,2), 5.0f);
|
||||
|
||||
VERIFY_IS_APPROX(mat4(0,0), 0.0f);
|
||||
VERIFY_IS_APPROX(mat4(0,1), 1.0f);
|
||||
VERIFY_IS_APPROX(mat4(0,2), 2.0f);
|
||||
VERIFY_IS_APPROX(mat4(1,0), 3.0f);
|
||||
VERIFY_IS_APPROX(mat4(1,1), 4.0f);
|
||||
VERIFY_IS_APPROX(mat4(1,2), 5.0f);
|
||||
}
|
||||
|
||||
static void test_3d()
|
||||
{
|
||||
Tensor<float, 3> mat1(2,3,7);
|
||||
Tensor<float, 3, RowMajor> mat2(2,3,7);
|
||||
|
||||
float val = 0.0;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
mat1(i,j,k) = val;
|
||||
mat2(i,j,k) = val;
|
||||
val += 1.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 3> mat3(2,3,7);
|
||||
mat3 = mat1 + mat1;
|
||||
Tensor<float, 3, RowMajor> mat4(2,3,7);
|
||||
mat4 = mat2 * 3.14f;
|
||||
Tensor<float, 3> mat5(2,3,7);
|
||||
mat5 = mat1.cwiseSqrt().cwiseSqrt();
|
||||
Tensor<float, 3, RowMajor> mat6(2,3,7);
|
||||
mat6 = mat2.cwiseSqrt() * 3.14f;
|
||||
|
||||
val = 0.0;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_APPROX(mat3(i,j,k), val + val);
|
||||
VERIFY_IS_APPROX(mat4(i,j,k), val * 3.14f);
|
||||
VERIFY_IS_APPROX(mat5(i,j,k), sqrtf(sqrtf(val)));
|
||||
VERIFY_IS_APPROX(mat6(i,j,k), sqrtf(val) * 3.14f);
|
||||
val += 1.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void test_cxx11_tensor_expr()
|
||||
{
|
||||
CALL_SUBTEST(test_1d());
|
||||
CALL_SUBTEST(test_2d());
|
||||
CALL_SUBTEST(test_3d());
|
||||
}
|
167
unsupported/test/cxx11_tensor_fixed_size.cpp
Normal file
167
unsupported/test/cxx11_tensor_fixed_size.cpp
Normal file
@ -0,0 +1,167 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#include "main.h"
|
||||
|
||||
#include <Eigen/CXX11/Tensor>
|
||||
|
||||
using Eigen::Tensor;
|
||||
using Eigen::RowMajor;
|
||||
|
||||
|
||||
static void test_1d()
|
||||
{
|
||||
TensorFixedSize<float, Sizes<6> > vec1;
|
||||
TensorFixedSize<float, Sizes<6>, RowMajor> vec2;
|
||||
|
||||
VERIFY_IS_EQUAL((vec1.size()), 6);
|
||||
// VERIFY_IS_EQUAL((vec1.dimensions()[0]), 6);
|
||||
// VERIFY_IS_EQUAL((vec1.dimension(0)), 6);
|
||||
|
||||
vec1(0) = 4.0; vec2(0) = 0.0;
|
||||
vec1(1) = 8.0; vec2(1) = 1.0;
|
||||
vec1(2) = 15.0; vec2(2) = 2.0;
|
||||
vec1(3) = 16.0; vec2(3) = 3.0;
|
||||
vec1(4) = 23.0; vec2(4) = 4.0;
|
||||
vec1(5) = 42.0; vec2(5) = 5.0;
|
||||
|
||||
float data3[6];
|
||||
TensorMap<TensorFixedSize<float, Sizes<6> > > vec3(data3, 6);
|
||||
vec3 = vec1.cwiseSqrt();
|
||||
float data4[6];
|
||||
TensorMap<TensorFixedSize<float, Sizes<6>, RowMajor> > vec4(data4, 6);
|
||||
vec4 = vec2.cwiseSqrt();
|
||||
|
||||
VERIFY_IS_EQUAL((vec3.size()), 6);
|
||||
// VERIFY_IS_EQUAL((vec3.dimensions()[0]), 6);
|
||||
// VERIFY_IS_EQUAL((vec3.dimension(0)), 6);
|
||||
|
||||
VERIFY_IS_APPROX(vec3(0), sqrtf(4.0));
|
||||
VERIFY_IS_APPROX(vec3(1), sqrtf(8.0));
|
||||
VERIFY_IS_APPROX(vec3(2), sqrtf(15.0));
|
||||
VERIFY_IS_APPROX(vec3(3), sqrtf(16.0));
|
||||
VERIFY_IS_APPROX(vec3(4), sqrtf(23.0));
|
||||
VERIFY_IS_APPROX(vec3(5), sqrtf(42.0));
|
||||
|
||||
VERIFY_IS_APPROX(vec4(0), sqrtf(0.0));
|
||||
VERIFY_IS_APPROX(vec4(1), sqrtf(1.0));
|
||||
VERIFY_IS_APPROX(vec4(2), sqrtf(2.0));
|
||||
VERIFY_IS_APPROX(vec4(3), sqrtf(3.0));
|
||||
VERIFY_IS_APPROX(vec4(4), sqrtf(4.0));
|
||||
VERIFY_IS_APPROX(vec4(5), sqrtf(5.0));
|
||||
|
||||
vec3 = vec1 + vec2;
|
||||
VERIFY_IS_APPROX(vec3(0), 4.0f + 0.0f);
|
||||
VERIFY_IS_APPROX(vec3(1), 8.0f + 1.0f);
|
||||
VERIFY_IS_APPROX(vec3(2), 15.0f + 2.0f);
|
||||
VERIFY_IS_APPROX(vec3(3), 16.0f + 3.0f);
|
||||
VERIFY_IS_APPROX(vec3(4), 23.0f + 4.0f);
|
||||
VERIFY_IS_APPROX(vec3(5), 42.0f + 5.0f);
|
||||
}
|
||||
|
||||
static void test_2d()
|
||||
{
|
||||
float data1[6];
|
||||
TensorMap<TensorFixedSize<float, Sizes<2, 3> >> mat1(data1,2,3);
|
||||
float data2[6];
|
||||
TensorMap<TensorFixedSize<float, Sizes<2, 3>, RowMajor>> mat2(data2,2,3);
|
||||
|
||||
VERIFY_IS_EQUAL((mat1.size()), 2*3);
|
||||
// VERIFY_IS_EQUAL((mat1.dimension(0)), 2);
|
||||
// VERIFY_IS_EQUAL((mat1.dimension(1)), 3);
|
||||
|
||||
mat1(0,0) = 0.0;
|
||||
mat1(0,1) = 1.0;
|
||||
mat1(0,2) = 2.0;
|
||||
mat1(1,0) = 3.0;
|
||||
mat1(1,1) = 4.0;
|
||||
mat1(1,2) = 5.0;
|
||||
|
||||
mat2(0,0) = -0.0;
|
||||
mat2(0,1) = -1.0;
|
||||
mat2(0,2) = -2.0;
|
||||
mat2(1,0) = -3.0;
|
||||
mat2(1,1) = -4.0;
|
||||
mat2(1,2) = -5.0;
|
||||
|
||||
TensorFixedSize<float, Sizes<2, 3>> mat3;
|
||||
TensorFixedSize<float, Sizes<2, 3>, RowMajor> mat4;
|
||||
mat3 = mat1.cwiseAbs();
|
||||
mat4 = mat2.cwiseAbs();
|
||||
|
||||
VERIFY_IS_EQUAL((mat3.size()), 2*3);
|
||||
// VERIFY_IS_EQUAL((mat3.dimension(0)), 2);
|
||||
// VERIFY_IS_EQUAL((mat3.dimension(1)), 3);
|
||||
|
||||
VERIFY_IS_APPROX(mat3(0,0), 0.0f);
|
||||
VERIFY_IS_APPROX(mat3(0,1), 1.0f);
|
||||
VERIFY_IS_APPROX(mat3(0,2), 2.0f);
|
||||
VERIFY_IS_APPROX(mat3(1,0), 3.0f);
|
||||
VERIFY_IS_APPROX(mat3(1,1), 4.0f);
|
||||
VERIFY_IS_APPROX(mat3(1,2), 5.0f);
|
||||
|
||||
VERIFY_IS_APPROX(mat4(0,0), 0.0f);
|
||||
VERIFY_IS_APPROX(mat4(0,1), 1.0f);
|
||||
VERIFY_IS_APPROX(mat4(0,2), 2.0f);
|
||||
VERIFY_IS_APPROX(mat4(1,0), 3.0f);
|
||||
VERIFY_IS_APPROX(mat4(1,1), 4.0f);
|
||||
VERIFY_IS_APPROX(mat4(1,2), 5.0f);
|
||||
}
|
||||
|
||||
static void test_3d()
|
||||
{
|
||||
TensorFixedSize<float, Sizes<2, 3, 7> > mat1;
|
||||
TensorFixedSize<float, Sizes<2, 3, 7>, RowMajor> mat2;
|
||||
|
||||
VERIFY_IS_EQUAL((mat1.size()), 2*3*7);
|
||||
// VERIFY_IS_EQUAL((mat1.dimension(0)), 2);
|
||||
// VERIFY_IS_EQUAL((mat1.dimension(1)), 3);
|
||||
// VERIFY_IS_EQUAL((mat1.dimension(2)), 7);
|
||||
|
||||
float val = 0.0;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
mat1(i,j,k) = val;
|
||||
mat2(i,j,k) = val;
|
||||
val += 1.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TensorFixedSize<float, Sizes<2, 3, 7> > mat3;
|
||||
mat3 = mat1.cwiseSqrt();
|
||||
TensorFixedSize<float, Sizes<2, 3, 7>, RowMajor> mat4;
|
||||
mat4 = mat2.cwiseSqrt();
|
||||
|
||||
VERIFY_IS_EQUAL((mat3.size()), 2*3*7);
|
||||
// VERIFY_IS_EQUAL((mat3.dimension(0)), 2);
|
||||
// VERIFY_IS_EQUAL((mat3.dimension(1)), 3);
|
||||
// VERIFY_IS_EQUAL((mat3.dimension(2)), 7);
|
||||
|
||||
|
||||
val = 0.0;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_APPROX(mat3(i,j,k), sqrtf(val));
|
||||
VERIFY_IS_APPROX(mat4(i,j,k), sqrtf(val));
|
||||
val += 1.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void test_cxx11_tensor_fixed_size()
|
||||
{
|
||||
CALL_SUBTEST(test_1d());
|
||||
CALL_SUBTEST(test_2d());
|
||||
CALL_SUBTEST(test_3d());
|
||||
}
|
142
unsupported/test/cxx11_tensor_map.cpp
Normal file
142
unsupported/test/cxx11_tensor_map.cpp
Normal file
@ -0,0 +1,142 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#include "main.h"
|
||||
|
||||
#include <Eigen/CXX11/Tensor>
|
||||
|
||||
using Eigen::Tensor;
|
||||
using Eigen::RowMajor;
|
||||
|
||||
static void test_1d()
|
||||
{
|
||||
Tensor<int, 1> vec1(6);
|
||||
Tensor<int, 1, RowMajor> vec2(6);
|
||||
|
||||
TensorMap<Tensor<const int, 1>> vec3(vec1.data(), 6);
|
||||
TensorMap<Tensor<const int, 1, RowMajor>> vec4(vec2.data(), 6);
|
||||
|
||||
vec1(0) = 4; vec2(0) = 0;
|
||||
vec1(1) = 8; vec2(1) = 1;
|
||||
vec1(2) = 15; vec2(2) = 2;
|
||||
vec1(3) = 16; vec2(3) = 3;
|
||||
vec1(4) = 23; vec2(4) = 4;
|
||||
vec1(5) = 42; vec2(5) = 5;
|
||||
|
||||
VERIFY_IS_EQUAL(vec1.size(), 6);
|
||||
VERIFY_IS_EQUAL(vec1.dimension(0), 6);
|
||||
|
||||
VERIFY_IS_EQUAL(vec3(0), 4);
|
||||
VERIFY_IS_EQUAL(vec3(1), 8);
|
||||
VERIFY_IS_EQUAL(vec3(2), 15);
|
||||
VERIFY_IS_EQUAL(vec3(3), 16);
|
||||
VERIFY_IS_EQUAL(vec3(4), 23);
|
||||
VERIFY_IS_EQUAL(vec3(5), 42);
|
||||
|
||||
VERIFY_IS_EQUAL(vec4(0), 0);
|
||||
VERIFY_IS_EQUAL(vec4(1), 1);
|
||||
VERIFY_IS_EQUAL(vec4(2), 2);
|
||||
VERIFY_IS_EQUAL(vec4(3), 3);
|
||||
VERIFY_IS_EQUAL(vec4(4), 4);
|
||||
VERIFY_IS_EQUAL(vec4(5), 5);
|
||||
}
|
||||
|
||||
static void test_2d()
|
||||
{
|
||||
Tensor<int, 2> mat1(2,3);
|
||||
Tensor<int, 2, RowMajor> mat2(2,3);
|
||||
|
||||
mat1(0,0) = 0;
|
||||
mat1(0,1) = 1;
|
||||
mat1(0,2) = 2;
|
||||
mat1(1,0) = 3;
|
||||
mat1(1,1) = 4;
|
||||
mat1(1,2) = 5;
|
||||
|
||||
mat2(0,0) = 0;
|
||||
mat2(0,1) = 1;
|
||||
mat2(0,2) = 2;
|
||||
mat2(1,0) = 3;
|
||||
mat2(1,1) = 4;
|
||||
mat2(1,2) = 5;
|
||||
|
||||
TensorMap<Tensor<const int, 2>> mat3(mat1.data(), 2, 3);
|
||||
TensorMap<Tensor<const int, 2, RowMajor>> mat4(mat2.data(), 2, 3);
|
||||
|
||||
VERIFY_IS_EQUAL(mat3.size(), 6);
|
||||
VERIFY_IS_EQUAL(mat3.dimension(0), 2);
|
||||
VERIFY_IS_EQUAL(mat3.dimension(1), 3);
|
||||
|
||||
VERIFY_IS_EQUAL(mat4.size(), 6);
|
||||
VERIFY_IS_EQUAL(mat4.dimension(0), 2);
|
||||
VERIFY_IS_EQUAL(mat4.dimension(1), 3);
|
||||
|
||||
VERIFY_IS_EQUAL(mat3(0,0), 0);
|
||||
VERIFY_IS_EQUAL(mat3(0,1), 1);
|
||||
VERIFY_IS_EQUAL(mat3(0,2), 2);
|
||||
VERIFY_IS_EQUAL(mat3(1,0), 3);
|
||||
VERIFY_IS_EQUAL(mat3(1,1), 4);
|
||||
VERIFY_IS_EQUAL(mat3(1,2), 5);
|
||||
|
||||
VERIFY_IS_EQUAL(mat4(0,0), 0);
|
||||
VERIFY_IS_EQUAL(mat4(0,1), 1);
|
||||
VERIFY_IS_EQUAL(mat4(0,2), 2);
|
||||
VERIFY_IS_EQUAL(mat4(1,0), 3);
|
||||
VERIFY_IS_EQUAL(mat4(1,1), 4);
|
||||
VERIFY_IS_EQUAL(mat4(1,2), 5);
|
||||
}
|
||||
|
||||
static void test_3d()
|
||||
{
|
||||
Tensor<int, 3> mat1(2,3,7);
|
||||
Tensor<int, 3, RowMajor> mat2(2,3,7);
|
||||
|
||||
int val = 0;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
mat1(i,j,k) = val;
|
||||
mat2(i,j,k) = val;
|
||||
val++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TensorMap<Tensor<const int, 3>> mat3(mat1.data(), 2, 3, 7);
|
||||
TensorMap<Tensor<const int, 3, RowMajor>> mat4(mat2.data(), 2, 3, 7);
|
||||
|
||||
VERIFY_IS_EQUAL(mat3.size(), 2*3*7);
|
||||
VERIFY_IS_EQUAL(mat3.dimension(0), 2);
|
||||
VERIFY_IS_EQUAL(mat3.dimension(1), 3);
|
||||
VERIFY_IS_EQUAL(mat3.dimension(2), 7);
|
||||
|
||||
VERIFY_IS_EQUAL(mat4.size(), 2*3*7);
|
||||
VERIFY_IS_EQUAL(mat4.dimension(0), 2);
|
||||
VERIFY_IS_EQUAL(mat4.dimension(1), 3);
|
||||
VERIFY_IS_EQUAL(mat4.dimension(2), 7);
|
||||
|
||||
val = 0;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_EQUAL(mat3(i,j,k), val);
|
||||
VERIFY_IS_EQUAL(mat4(i,j,k), val);
|
||||
val++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void test_cxx11_tensor_map()
|
||||
{
|
||||
CALL_SUBTEST(test_1d());
|
||||
CALL_SUBTEST(test_2d());
|
||||
CALL_SUBTEST(test_3d());
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user