Require recent GCC and MSCV and removed EIGEN_HAS_CXX14 and some other feature test macros

This commit is contained in:
Erik Schultheis 2021-12-01 00:48:34 +00:00 committed by Rasmus Munk Larsen
parent 085c2fc5d5
commit ec2fd0f7ed
39 changed files with 52 additions and 702 deletions

View File

@ -26,7 +26,7 @@ if(CMAKE_VERSION VERSION_LESS 3.21.0)
endif()
endif()
set(CMAKE_CXX_STANDARD 11 CACHE STRING "Default C++ standard")
set(CMAKE_CXX_STANDARD 14 CACHE STRING "Default C++ standard")
set(CMAKE_CXX_STANDARD_REQUIRED ON CACHE BOOL "Require C++ standard")
set(CMAKE_CXX_EXTENSIONS OFF CACHE BOOL "Allow C++ extensions")

View File

@ -36,7 +36,7 @@
// Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3)
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details.
#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6) && EIGEN_GNUC_AT_MOST(5,5)
#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_MOST(5,5)
#pragma GCC optimize ("-fno-ipa-cp-clone")
#endif

View File

@ -62,7 +62,7 @@ struct plain_array
#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
#elif EIGEN_GNUC_AT_LEAST(4,7)
#elif EIGEN_COMP_GNUC
// GCC 4.7 is too aggressive in its optimizations and remove the alignment test based on the fact the array is declared to be aligned.
// See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900
// Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined:

View File

@ -116,10 +116,9 @@ namespace Eigen
#else
template <typename Derived,typename ScalarExponent>
EIGEN_DEVICE_FUNC inline
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(
const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<typename Derived::Scalar
EIGEN_COMMA ScalarExponent EIGEN_COMMA
EIGEN_SCALAR_BINARY_SUPPORTED(pow,typename Derived::Scalar,ScalarExponent)>::type,pow))
EIGEN_SCALAR_BINARY_SUPPORTED(pow,typename Derived::Scalar,ScalarExponent)>::type,pow)
pow(const Eigen::ArrayBase<Derived>& x, const ScalarExponent& exponent)
{
typedef typename internal::promote_scalar_arg<typename Derived::Scalar,ScalarExponent,
@ -170,10 +169,9 @@ namespace Eigen
#else
template <typename Scalar, typename Derived>
EIGEN_DEVICE_FUNC inline
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(
const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<typename Derived::Scalar
EIGEN_COMMA Scalar EIGEN_COMMA
EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar)>::type,Derived,pow))
EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar)>::type,Derived,pow)
pow(const Scalar& x, const Eigen::ArrayBase<Derived>& exponents) {
typedef typename internal::promote_scalar_arg<typename Derived::Scalar,Scalar,
EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar)>::type PromotedScalar;

View File

@ -21,15 +21,6 @@
namespace Eigen {
// On WINCE, std::abs is defined for int only, so let's defined our own overloads:
// This issue has been confirmed with MSVC 2008 only, but the issue might exist for more recent versions too.
#if EIGEN_OS_WINCE && EIGEN_COMP_MSVC && EIGEN_COMP_MSVC<=1500
long abs(long x) { return (labs(x)); }
double abs(double x) { return (fabs(x)); }
float abs(float x) { return (fabsf(x)); }
long double abs(long double x) { return (fabsl(x)); }
#endif
namespace internal {
/** \internal \class global_math_functions_filtering_base
@ -963,7 +954,7 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
// Implementation of is* functions
// std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang.
#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG)
#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC) || (EIGEN_COMP_CLANG)
#define EIGEN_USE_STD_FPCLASSIFY 1
#else
#define EIGEN_USE_STD_FPCLASSIFY 0
@ -1049,7 +1040,7 @@ EIGEN_DEVICE_FUNC inline bool isinf_impl(const float& x) { return isinf_ms
#elif (defined __FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ && EIGEN_COMP_GNUC)
#if EIGEN_GNUC_AT_LEAST(5,0)
#if EIGEN_COMP_GNUC
#define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((optimize("no-finite-math-only")))
#else
// NOTE the inline qualifier and noinline attribute are both needed: the former is to avoid linking issue (duplicate symbol),

View File

@ -621,7 +621,7 @@ inline float trig_reduce_huge (float xf, int *quadrant)
template<bool ComputeSine,typename Packet>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED
#if EIGEN_GNUC_AT_LEAST(4,4) && EIGEN_COMP_GNUC_STRICT
#if EIGEN_COMP_GNUC_STRICT
__attribute__((optimize("-fno-unsafe-math-optimizations")))
#endif
Packet psincos_float(const Packet& _x)

View File

@ -247,18 +247,9 @@ template<> struct scalar_div_cost<float,true> { enum { value = 7 }; };
template<> struct scalar_div_cost<double,true> { enum { value = 8 }; };
#endif
#if EIGEN_COMP_MSVC==1500
// Workaround MSVC 9 internal compiler error.
// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps(from,from,from,from); }
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); }
#else
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps1(from); }
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
#endif
template<> EIGEN_STRONG_INLINE Packet16b pset1<Packet16b>(const bool& from) { return _mm_set1_epi8(static_cast<char>(from)); }
template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) { return _mm_castsi128_ps(pset1<Packet4i>(from)); }
@ -721,15 +712,7 @@ template<> EIGEN_STRONG_INLINE Packet16b pload<Packet16b>(const bool* from)
#if EIGEN_COMP_MSVC
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
EIGEN_DEBUG_UNALIGNED_LOAD
#if (EIGEN_COMP_MSVC==1600)
// NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
// (i.e., it does not generate an unaligned load!!
__m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
res = _mm_loadh_pi(res, (const __m64*)(from+2));
return res;
#else
return _mm_loadu_ps(from);
#endif
}
#else
// NOTE: with the code below, MSVC's compiler crashes!

View File

@ -154,7 +154,7 @@ template<typename Functor> struct functor_has_linear_access { enum { ret = !has_
// For unreliable compilers, let's specialize the has_*ary_operator
// helpers so that at least built-in nullary functors work fine.
#if !( (EIGEN_COMP_MSVC>1600) || (EIGEN_GNUC_AT_LEAST(4,8)) || (EIGEN_COMP_ICC>=1600))
#if !( EIGEN_COMP_MSVC || EIGEN_COMP_GNUC || (EIGEN_COMP_ICC>=1600))
template<typename Scalar,typename IndexType>
struct has_nullary_operator<scalar_constant_op<Scalar>,IndexType> { enum { value = 1}; };
template<typename Scalar,typename IndexType>

View File

@ -30,27 +30,13 @@
*
* If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
* vectorized and non-vectorized code.
*
* FIXME: this code can be cleaned up once we switch to proper C++11 only.
*/
#if (defined EIGEN_CUDACC)
#define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)
#define EIGEN_ALIGNOF(x) __alignof(x)
#elif EIGEN_HAS_ALIGNAS
#else
#define EIGEN_ALIGN_TO_BOUNDARY(n) alignas(n)
#define EIGEN_ALIGNOF(x) alignof(x)
#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM
#define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
#define EIGEN_ALIGNOF(x) __alignof(x)
#elif EIGEN_COMP_MSVC
#define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
#define EIGEN_ALIGNOF(x) __alignof(x)
#elif EIGEN_COMP_SUNCC
// FIXME not sure about this one:
#define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
#define EIGEN_ALIGNOF(x) __alignof(x)
#else
#error Please tell me what is the equivalent of alignas(n) and alignof(x) for your compiler
#endif
// If the user explicitly disable vectorization, then we also disable alignment
@ -200,14 +186,12 @@
// removed as gcc 4.1 and msvc 2008 are not supported anyways.
#if EIGEN_COMP_MSVC
#include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
#if (EIGEN_COMP_MSVC >= 1500) // 2008 or later
// a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
#if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64
#define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
#endif
// a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
#if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64
#define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
#endif
#else
#if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) )
#if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_COMP_GNUC )
#define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
#endif
#endif

View File

@ -129,10 +129,6 @@
// For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC:
// name ver MSC_VER
// 2008 9 1500
// 2010 10 1600
// 2012 11 1700
// 2013 12 1800
// 2015 14 1900
// "15" 15 1900
// 2017-14.1 15.0 1910
@ -140,6 +136,9 @@
// 2017-14.12 15.5 1912
// 2017-14.13 15.6 1913
// 2017-14.14 15.7 1914
// 2017 15.8 1915
// 2017 15.9 1916
// 2019 RTW 16.0 1920
/// \internal EIGEN_COMP_MSVC_LANG set to _MSVC_LANG if the compiler is Microsoft Visual C++, 0 otherwise.
#if defined(_MSVC_LANG)
@ -581,16 +580,6 @@
# define __has_feature(x) 0
#endif
// Some old compilers do not support template specializations like:
// template<typename T,int N> void foo(const T x[N]);
#if !( EIGEN_COMP_CLANG && ( (EIGEN_COMP_CLANG<309) \
|| (defined(__apple_build_version__) && (__apple_build_version__ < 9000000))) \
|| EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<49)
#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 1
#else
#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 0
#endif
// The macro EIGEN_CPLUSPLUS is a replacement for __cplusplus/_MSVC_LANG that
// works for both platforms, indicating the C++ standard version number.
//
@ -624,7 +613,7 @@
#endif
#ifndef EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
#if defined(__cpp_variable_templates) && __cpp_variable_templates >= 201304 && EIGEN_MAX_CPP_VER>=14
#if defined(__cpp_variable_templates) && __cpp_variable_templates >= 201304
#define EIGEN_HAS_CXX14_VARIABLE_TEMPLATES 1
#else
#define EIGEN_HAS_CXX14_VARIABLE_TEMPLATES 0
@ -636,17 +625,13 @@
// but in practice we should not rely on them but rather on the availability of
// individual features as defined later.
// This is why there is no EIGEN_HAS_CXX17.
// FIXME: get rid of EIGEN_HAS_CXX14.
#if EIGEN_MAX_CPP_VER<11 || EIGEN_COMP_CXXVER<11 || (EIGEN_COMP_MSVC && EIGEN_COMP_MSVC < 1700) || (EIGEN_COMP_ICC && EIGEN_COMP_ICC < 1400)
#if EIGEN_MAX_CPP_VER<14 || EIGEN_COMP_CXXVER<14 || (EIGEN_COMP_MSVC && EIGEN_COMP_MSVC < 1900) || \
(EIGEN_COMP_ICC && EIGEN_COMP_ICC < 1500) || (EIGEN_COMP_NVCC && EIGEN_COMP_NVCC < 80000) || \
(EIGEN_COMP_CLANG && ((EIGEN_COMP_CLANG<309) || (defined(__apple_build_version__) && (__apple_build_version__ < 9000000)))) || \
(EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<51)
#error This compiler appears to be too old to be supported by Eigen
#endif
#if EIGEN_MAX_CPP_VER>=14 && EIGEN_COMP_CXXVER>=14
#define EIGEN_HAS_CXX14 1
#else
#define EIGEN_HAS_CXX14 0
#endif
// Does the compiler support C99?
// Need to include <cmath> to make sure _GLIBCXX_USE_C99 gets defined
#include <cmath>
@ -654,7 +639,7 @@
#if ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)) \
|| (defined(__GNUC__) && defined(_GLIBCXX_USE_C99)) \
|| (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) \
|| (EIGEN_COMP_MSVC >= 1900) || defined(SYCL_DEVICE_ONLY))
|| (EIGEN_COMP_MSVC) || defined(SYCL_DEVICE_ONLY))
#define EIGEN_HAS_C99_MATH 1
#else
#define EIGEN_HAS_C99_MATH 0
@ -690,27 +675,11 @@
#endif
#endif
#ifndef EIGEN_HAS_ALIGNAS
#if ( __has_feature(cxx_alignas) \
|| EIGEN_HAS_CXX14 \
|| (EIGEN_COMP_MSVC >= 1800) \
|| (EIGEN_GNUC_AT_LEAST(4,8)) \
|| (EIGEN_COMP_CLANG>=305) \
|| (EIGEN_COMP_ICC>=1500) \
|| (EIGEN_COMP_PGI>=1500) \
|| (EIGEN_COMP_SUNCC>=0x5130))
#define EIGEN_HAS_ALIGNAS 1
#else
#define EIGEN_HAS_ALIGNAS 0
#endif
#endif
// Does the compiler support type_traits?
// - full support of type traits was added only to GCC 5.1.0.
// - 20150626 corresponds to the last release of 4.x libstdc++
#ifndef EIGEN_HAS_TYPE_TRAITS
#if ((!EIGEN_COMP_GNUC_STRICT) || EIGEN_GNUC_AT_LEAST(5, 1)) \
&& ((!defined(__GLIBCXX__)) || __GLIBCXX__ > 20150626)
#if (!defined(__GLIBCXX__)) || __GLIBCXX__ > 20150626
#define EIGEN_HAS_TYPE_TRAITS 1
#define EIGEN_INCLUDE_TYPE_TRAITS
#else
@ -718,29 +687,14 @@
#endif
#endif
// Does the compiler support variadic templates?
#ifndef EIGEN_HAS_VARIADIC_TEMPLATES
#if (!defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (EIGEN_COMP_NVCC >= 80000) )
// ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices:
// this prevents nvcc from crashing when compiling Eigen on Tegra X1
#define EIGEN_HAS_VARIADIC_TEMPLATES 1
#elif defined(SYCL_DEVICE_ONLY)
#define EIGEN_HAS_VARIADIC_TEMPLATES 1
#else
#define EIGEN_HAS_VARIADIC_TEMPLATES 0
#endif
#endif
// Does the compiler fully support const expressions? (as in c++14)
#ifndef EIGEN_HAS_CONSTEXPR
#if defined(EIGEN_CUDACC)
// Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above
#if EIGEN_MAX_CPP_VER>=14 && (EIGEN_COMP_CLANG || EIGEN_COMP_NVCC >= 70500)
#if (EIGEN_COMP_CLANG || EIGEN_COMP_NVCC >= 70500)
#define EIGEN_HAS_CONSTEXPR 1
#endif
#elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (EIGEN_COMP_CXXVER >= 14) || \
(EIGEN_GNUC_AT_LEAST(4,8) && (EIGEN_COMP_CXXVER >= 11)) || \
(EIGEN_COMP_CLANG >= 306 && (EIGEN_COMP_CXXVER >= 11)))
#else
#define EIGEN_HAS_CONSTEXPR 1
#endif
@ -759,8 +713,7 @@
// Does the compiler support C++11 math?
// Let's be conservative and enable the default C++11 implementation only if we are sure it exists
#ifndef EIGEN_HAS_CXX11_MATH
#if ((EIGEN_COMP_CXXVER > 11) || (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC) \
&& (EIGEN_ARCH_i386_OR_x86_64) && (EIGEN_OS_GNULINUX || EIGEN_OS_WIN_STRICT || EIGEN_OS_MAC))
#if (EIGEN_ARCH_i386_OR_x86_64 && (EIGEN_OS_GNULINUX || EIGEN_OS_WIN_STRICT || EIGEN_OS_MAC))
#define EIGEN_HAS_CXX11_MATH 1
#else
#define EIGEN_HAS_CXX11_MATH 0
@ -844,15 +797,11 @@
#endif
#endif
// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
// EIGEN_ALWAYS_INLINE is the strongest, it has the effect of making the function inline and adding every possible
// attribute to maximize inlining. This should only be used when really necessary: in particular,
// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.
// FIXME with the always_inline attribute,
// gcc 3.4.x and 4.1 reports the following compilation error:
// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
// : function body not available
// See also bug 1367
#if EIGEN_GNUC_AT_LEAST(4,2) && !defined(SYCL_DEVICE_ONLY)
#if EIGEN_COMP_GNUC && !defined(SYCL_DEVICE_ONLY)
#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
#else
#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
@ -1094,8 +1043,8 @@ namespace Eigen {
#define EIGEN_USING_STD(FUNC) using std::FUNC;
#endif
#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || EIGEN_COMP_NVCC)
// For older MSVC versions, as well as when compiling with NVCC, using the base operator is necessary,
#if EIGEN_COMP_MSVC_STRICT && EIGEN_COMP_NVCC
// Wwhen compiling with NVCC, using the base operator is necessary,
// otherwise we get duplicate definition errors
// For later MSVC versions, we require explicit operator= definition, otherwise we get
// use of implicitly deleted operator errors.
@ -1251,16 +1200,9 @@ namespace Eigen {
CwiseBinaryOp<EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)<SCALAR,typename internal::traits<EXPR>::Scalar>, \
const typename internal::plain_constant_type<EXPR,SCALAR>::type, const EXPR>
// Workaround for MSVC 2010 (see ML thread "patch with compile for for MSVC 2010")
#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC_STRICT<=1600)
#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) typename internal::enable_if<true,X>::type
#else
#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) X
#endif
#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) \
template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type,OPNAME))\
const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type,OPNAME)\
(METHOD)(const T& scalar) const { \
typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type PromotedT; \
return EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,PromotedT,OPNAME)(derived(), \
@ -1269,7 +1211,7 @@ namespace Eigen {
#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \
template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend \
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type,Derived,OPNAME)) \
const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type,Derived,OPNAME) \
(METHOD)(const T& scalar, const StorageBaseType& matrix) { \
typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type PromotedT; \
return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedT,Derived,OPNAME)( \
@ -1314,7 +1256,6 @@ namespace Eigen {
#define EIGEN_EXCEPTION_SPEC(X) noexcept(false)
#if EIGEN_HAS_VARIADIC_TEMPLATES
// The all function is used to enable a variadic version of eigen_assert which can take a parameter pack as its input.
namespace Eigen {
namespace internal {
@ -1326,7 +1267,6 @@ bool all(T t, Ts ... ts){ return t && all(ts...); }
}
}
#endif
// provide override and final specifiers if they are available:
#define EIGEN_OVERRIDE override

View File

@ -936,7 +936,7 @@ public:
__asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
# endif
# elif EIGEN_COMP_MSVC
# if (EIGEN_COMP_MSVC > 1500) && EIGEN_ARCH_i386_OR_x86_64
# if EIGEN_ARCH_i386_OR_x86_64
# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
# endif
# endif

View File

@ -371,7 +371,7 @@ struct invoke_result {
#endif
// C++14 integer/index_sequence.
#if defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304L && EIGEN_MAX_CPP_VER >= 14
#if defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304L
using std::integer_sequence;
using std::make_integer_sequence;

View File

@ -92,10 +92,8 @@ public:
template<typename T>
Index eval(const T& values) const { return derived().eval_impl(values); }
#if EIGEN_HAS_CXX14
template<typename... Types>
Index eval(Types&&... values) const { return derived().eval_impl(std::make_tuple(values...)); }
#endif
NegateExpr<Derived> operator-() const { return NegateExpr<Derived>(derived()); }
@ -143,34 +141,6 @@ public:
friend QuotientExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator/(internal::FixedInt<N>, const BaseExpr& b)
{ return QuotientExpr<ValueExpr<internal::FixedInt<N> > ,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }
#if (!EIGEN_HAS_CXX14)
template<int N>
AddExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator+(internal::FixedInt<N> (*)()) const
{ return AddExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(), ValueExpr<internal::FixedInt<N> >()); }
template<int N>
AddExpr<Derived,ValueExpr<internal::FixedInt<-N> > > operator-(internal::FixedInt<N> (*)()) const
{ return AddExpr<Derived,ValueExpr<internal::FixedInt<-N> > >(derived(), ValueExpr<internal::FixedInt<-N> >()); }
template<int N>
ProductExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator*(internal::FixedInt<N> (*)()) const
{ return ProductExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(),ValueExpr<internal::FixedInt<N> >()); }
template<int N>
QuotientExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator/(internal::FixedInt<N> (*)()) const
{ return QuotientExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(),ValueExpr<internal::FixedInt<N> >()); }
template<int N>
friend AddExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator+(internal::FixedInt<N> (*)(), const BaseExpr& b)
{ return AddExpr<Derived,ValueExpr<internal::FixedInt<N> > >(b.derived(), ValueExpr<internal::FixedInt<N> >()); }
template<int N>
friend AddExpr<NegateExpr<Derived>,ValueExpr<internal::FixedInt<N> > > operator-(internal::FixedInt<N> (*)(), const BaseExpr& b)
{ return AddExpr<NegateExpr<Derived>,ValueExpr<internal::FixedInt<N> > >(-b.derived(), ValueExpr<internal::FixedInt<N> >()); }
template<int N>
friend ProductExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator*(internal::FixedInt<N> (*)(), const BaseExpr& b)
{ return ProductExpr<ValueExpr<internal::FixedInt<N> >,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }
template<int N>
friend QuotientExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator/(internal::FixedInt<N> (*)(), const BaseExpr& b)
{ return QuotientExpr<ValueExpr<internal::FixedInt<N> > ,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }
#endif
template<typename OtherDerived>
AddExpr<Derived,OtherDerived> operator+(const BaseExpr<OtherDerived> &b) const
@ -232,11 +202,9 @@ public:
Index eval_impl(const SymbolValue<Tag> &values) const { return values.value(); }
#if EIGEN_HAS_CXX14
// C++14 versions suitable for multiple symbols
template<typename... Types>
Index eval_impl(const std::tuple<Types...>& values) const { return std::get<SymbolValue<Tag> >(values).value(); }
#endif
};
template<typename Arg0>

View File

@ -288,10 +288,7 @@ class SparseMatrix
#else
template<class SizesType>
inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
#if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
typename
#endif
SizesType::value_type())
typename SizesType::value_type())
{
EIGEN_UNUSED_VARIABLE(enableif);
reserveInnerVectors(reserveSizes);

View File

@ -105,8 +105,6 @@ operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_IND
return Base::operator()(internal::eval_expr_given_size(rowIndices,rows()),internal::eval_expr_given_size(colIndices,cols()));
}
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
// The following three overloads are needed to handle raw Index[N] arrays.
template<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndices>
@ -133,7 +131,6 @@ operator()(const RowIndicesT (&rowIndices)[RowIndicesN], const ColIndicesT (&col
(derived(), rowIndices, colIndices);
}
#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE
// Overloads for 1D vectors/arrays
@ -178,8 +175,6 @@ operator()(const IndexType& id) EIGEN_INDEXED_VIEW_METHOD_CONST
return Base::operator()(internal::eval_expr_given_size(id,size()));
}
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
template<typename IndicesT, std::size_t IndicesN>
typename internal::enable_if<IsRowMajor,
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,const IndicesT (&)[IndicesN]> >::type
@ -200,8 +195,6 @@ operator()(const IndicesT (&indices)[IndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST
(derived(), indices, IvcIndex(0));
}
#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE
#undef EIGEN_INDEXED_VIEW_METHOD_CONST
#undef EIGEN_INDEXED_VIEW_METHOD_TYPE

View File

@ -55,23 +55,17 @@ By default, %Eigen strive to automatically detect and enable language features a
the information provided by the compiler.
- \b EIGEN_MAX_CPP_VER - disables usage of C++ features requiring a version greater than EIGEN_MAX_CPP_VER.
Possible values are: 11, 14, 17, etc. If not defined (the default), %Eigen enables all features supported
Possible values are: 14, 17, etc. If not defined (the default), %Eigen enables all features supported
by the compiler.
Individual features can be explicitly enabled or disabled by defining the following token to 0 or 1 respectively.
For instance, one might limit the C++ version to C++03 by defining EIGEN_MAX_CPP_VER=03, but still enable C99 math
For instance, one might limit the C++ version to C++14 by defining EIGEN_MAX_CPP_VER=14, but still enable C99 math
functions by defining EIGEN_HAS_C99_MATH=1.
- \b EIGEN_HAS_C99_MATH - controls the usage of C99 math functions such as erf, erfc, lgamma, etc.
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- \b EIGEN_HAS_CXX11_MATH - controls the implementation of some functions such as round, logp1, isinf, isnan, etc.
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- \b EIGEN_HAS_STD_RESULT_OF - defines whether std::result_of is supported
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- \b EIGEN_HAS_VARIADIC_TEMPLATES - defines whether variadic templates are supported
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- \b EIGEN_HAS_CONSTEXPR - defines whether relaxed const expression are supported
Automatic detection disabled if EIGEN_MAX_CPP_VER<14.
- \b EIGEN_NO_IO - Disables any usage and support for `<iostreams>`.
\section TopicPreprocessorDirectivesAssertions Assertions

View File

@ -1,5 +1,3 @@
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
MatrixXi A = MatrixXi::Random(4,6);
cout << "Initial matrix A:\n" << A << "\n\n";
cout << "A(all,{4,2,5,5,3}):\n" << A(all,{4,2,5,5,3}) << "\n\n";
#endif

View File

@ -90,8 +90,6 @@ void dense_storage_swap(int rows0, int cols0, int rows1, int cols1)
template<typename T, int Size, std::size_t Alignment>
void dense_storage_alignment()
{
#if EIGEN_HAS_ALIGNAS
struct alignas(Alignment) Empty1 {};
VERIFY_IS_EQUAL(std::alignment_of<Empty1>::value, Alignment);
@ -109,8 +107,6 @@ void dense_storage_alignment()
VERIFY_IS_EQUAL( (std::alignment_of<Matrix<T,Size,1,AutoAlign> >::value), default_alignment);
struct Nested2 { Matrix<T,Size,1,AutoAlign> mat; };
VERIFY_IS_EQUAL(std::alignment_of<Nested2>::value, default_alignment);
#endif
}
template<typename T>

View File

@ -7,11 +7,6 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifdef EIGEN_TEST_PART_2
// Make sure we also check c++11 max implementation
#define EIGEN_MAX_CPP_VER 11
#endif
#include <valarray>
#include <vector>
#include "main.h"
@ -84,11 +79,7 @@ void check_indexed_view()
ArrayXd a = ArrayXd::LinSpaced(n,0,n-1);
Array<double,1,Dynamic> b = a.transpose();
#if EIGEN_COMP_CXXVER>=14
ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ref(encode));
#else
ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ptr_fun(&encode));
#endif
for(Index i=0; i<n; ++i)
for(Index j=0; j<n; ++j)
@ -299,7 +290,6 @@ void check_indexed_view()
VERIFY_IS_APPROX( (A(std::array<int,3>{{1,3,5}}, std::array<int,4>{{9,6,3,0}})), A(seqN(1,3,2), seqN(9,4,-3)) );
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
VERIFY_IS_APPROX( A({3, 1, 6, 5}, all), A(std::array<int,4>{{3, 1, 6, 5}}, all) );
VERIFY_IS_APPROX( A(all,{3, 1, 6, 5}), A(all,std::array<int,4>{{3, 1, 6, 5}}) );
VERIFY_IS_APPROX( A({1,3,5},{3, 1, 6, 5}), A(std::array<int,3>{{1,3,5}},std::array<int,4>{{3, 1, 6, 5}}) );
@ -312,7 +302,6 @@ void check_indexed_view()
VERIFY_IS_APPROX( b({3, 1, 6, 5}), b(std::array<int,4>{{3, 1, 6, 5}}) );
VERIFY_IS_EQUAL( b({1,3,5}).SizeAtCompileTime, 3 );
#endif
// check mat(i,j) with weird types for i and j
{
@ -438,7 +427,6 @@ EIGEN_DECLARE_TEST(indexed_view)
{
// for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( check_indexed_view() );
CALL_SUBTEST_2( check_indexed_view() );
// }
// static checks of some internals:

View File

@ -114,13 +114,7 @@ EIGEN_DECLARE_TEST(meta)
// So the following tests are expected to fail with recent compilers.
STATIC_CHECK(( !internal::is_convertible<MyInterface, MyImpl>::value ));
#if (!EIGEN_COMP_GNUC_STRICT) || (EIGEN_GNUC_AT_LEAST(4,8))
// GCC prior to 4.8 fails to compile this test:
// error: cannot allocate an object of abstract type 'MyInterface'
// In other word, it does not obey SFINAE.
// Nevertheless, we don't really care about supporting abstract type as scalar type!
STATIC_CHECK(( !internal::is_convertible<MyImpl, MyInterface>::value ));
#endif
STATIC_CHECK(( internal::is_convertible<MyImpl, const MyInterface&>::value ));
#endif

View File

@ -21,7 +21,7 @@
// Deal with i387 extended precision
#if EIGEN_ARCH_i386 && !(EIGEN_ARCH_x86_64)
#if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(4,4)
#if EIGEN_COMP_GNUC_STRICT
#pragma GCC optimize ("-ffloat-store")
#else
#undef VERIFY_IS_EQUAL

View File

@ -452,10 +452,8 @@ void test_stl_iterators(int rows=Rows, int cols=Cols)
using VecOp = VectorwiseOp<ArrayXXi, 0>;
STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::declval<const VecOp&>().cbegin())>::value ));
STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::declval<const VecOp&>().cend ())>::value ));
#if EIGEN_COMP_CXXVER>=14
STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cbegin(std::declval<const VecOp&>()))>::value ));
STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cend (std::declval<const VecOp&>()))>::value ));
#endif
STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cbegin(std::declval<const VecOp&>()))>::value ));
STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cend (std::declval<const VecOp&>()))>::value ));
}
}

View File

@ -113,7 +113,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
inline Self& base() { return *this; }
inline const Self& base() const { return *this; }
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
@ -121,7 +120,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
#endif
// normal indices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
@ -153,7 +151,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
return m_storage.data()[index];
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
@ -161,7 +158,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
#endif
// normal indices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
@ -193,7 +189,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
return m_storage.data()[index];
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
@ -201,28 +196,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
{
return coeff(array<Index, 2>(i0, i1));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
{
return coeff(array<Index, 3>(i0, i1, i2));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
{
return coeff(array<Index, 4>(i0, i1, i2, i3));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
{
return coeff(array<Index, 5>(i0, i1, i2, i3, i4));
}
#endif
// custom indices
#ifdef EIGEN_HAS_SFINAE
@ -260,7 +233,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
return coeff(index);
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
@ -268,28 +240,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
{
return coeffRef(array<Index, 2>(i0, i1));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
{
return coeffRef(array<Index, 3>(i0, i1, i2));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
{
return coeffRef(array<Index, 4>(i0, i1, i2, i3));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
{
return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4));
}
#endif
// normal indices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
@ -339,7 +289,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
{
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions)
: m_storage(firstDimension, otherDimensions...)
@ -347,33 +296,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
#else
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1)
: m_storage(dim1, array<Index, 1>(dim1))
{
EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2)
: m_storage(dim1*dim2, array<Index, 2>(dim1, dim2))
{
EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3)
: m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3))
{
EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4)
: m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4))
{
EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5)
: m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5))
{
EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
#endif
/** Normal Dimension */
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions)
@ -434,7 +356,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
return *this;
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
void resize(Index firstDimension, IndexTypes... otherDimensions)
{
@ -442,7 +363,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
}
#endif
/** Normal Dimension */
EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions)

View File

@ -1012,7 +1012,6 @@ class TensorBase : public TensorBase<Derived, ReadOnlyAccessors> {
return derived() = this->template random<RandomGenerator>();
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& setValues(
const typename internal::Initializer<Derived, NumDimensions>::InitList& vals) {
@ -1020,7 +1019,6 @@ class TensorBase : public TensorBase<Derived, ReadOnlyAccessors> {
internal::initialize_tensor<Derived, NumDimensions>(eval, vals);
return derived();
}
#endif // EIGEN_HAS_VARIADIC_TEMPLATES
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator+=(const OtherDerived& other) {

View File

@ -111,12 +111,10 @@ struct Sizes {
explicit EIGEN_DEVICE_FUNC Sizes(const array<DenseIndex, Base::count>& /*indices*/) {
// todo: add assertion
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template <typename... DenseIndex> EIGEN_DEVICE_FUNC Sizes(DenseIndex...) { }
explicit EIGEN_DEVICE_FUNC Sizes(std::initializer_list<std::ptrdiff_t> /*l*/) {
// todo: add assertion
}
#endif
template <typename T> Sizes& operator = (const T& /*other*/) {
// add assertion failure if the size of other is different
@ -173,17 +171,17 @@ template <std::ptrdiff_t V1=0, std::ptrdiff_t V2=0, std::ptrdiff_t V3=0, std::pt
explicit Sizes(const array<DenseIndex, Base::count>& /*indices*/) {
// todo: add assertion
}
template <typename T> Sizes& operator = (const T& /*other*/) {
// add assertion failure if the size of other is different
return *this;
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template <typename... DenseIndex> Sizes(DenseIndex... /*indices*/) { }
explicit Sizes(std::initializer_list<std::ptrdiff_t>) {
// todo: add assertion
}
#else
EIGEN_DEVICE_FUNC explicit Sizes(const DenseIndex) {
}
EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex) {
@ -337,39 +335,10 @@ struct DSizes : array<DenseIndex, NumDims> {
}
#endif
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) {
EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 2 == NumDims, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
#else
EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1) {
eigen_assert(NumDims == 2);
(*this)[0] = i0;
(*this)[1] = i1;
}
EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2) {
eigen_assert(NumDims == 3);
(*this)[0] = i0;
(*this)[1] = i1;
(*this)[2] = i2;
}
EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3) {
eigen_assert(NumDims == 4);
(*this)[0] = i0;
(*this)[1] = i1;
(*this)[2] = i2;
(*this)[3] = i3;
}
EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3, const DenseIndex i4) {
eigen_assert(NumDims == 5);
(*this)[0] = i0;
(*this)[1] = i1;
(*this)[2] = i2;
(*this)[3] = i3;
(*this)[4] = i4;
}
#endif
EIGEN_DEVICE_FUNC DSizes& operator = (const array<DenseIndex, NumDims>& other) {
*static_cast<Base*>(this) = other;

View File

@ -74,7 +74,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
inline Self& base() { return *this; }
inline const Self& base() const { return *this; }
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
{
@ -82,7 +81,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
@ -106,7 +104,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
{
@ -114,7 +111,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
@ -137,7 +133,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
return m_storage.data()[0];
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
{
@ -145,53 +140,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
{
if (Options&RowMajor) {
const Index index = i1 + i0 * m_storage.dimensions()[1];
return m_storage.data()[index];
} else {
const Index index = i0 + i1 * m_storage.dimensions()[0];
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
{
if (Options&RowMajor) {
const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
{
if (Options&RowMajor) {
const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
{
if (Options&RowMajor) {
const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
return m_storage.data()[index];
}
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
@ -222,7 +170,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
return coeff(index);
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
{
@ -230,52 +177,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
{
if (Options&RowMajor) {
const Index index = i1 + i0 * m_storage.dimensions()[1];
return m_storage.data()[index];
} else {
const Index index = i0 + i1 * m_storage.dimensions()[0];
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
{
if (Options&RowMajor) {
const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
{
if (Options&RowMajor) {
const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
{
if (Options&RowMajor) {
const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
return m_storage.data()[index];
}
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)

View File

@ -12,7 +12,7 @@
#include "./InternalHeaderCheck.h"
#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES
#if EIGEN_HAS_CONSTEXPR
#define EIGEN_HAS_INDEX_LIST

View File

@ -10,8 +10,6 @@
#ifndef EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
#define EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
#if EIGEN_HAS_VARIADIC_TEMPLATES
#include <initializer_list>
#include "./InternalHeaderCheck.h"
@ -79,6 +77,4 @@ void initialize_tensor(TensorEvaluator<Derived, DefaultDevice>& tensor,
} // namespace internal
} // namespace Eigen
#endif // EIGEN_HAS_VARIADIC_TEMPLATES
#endif // EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H

View File

@ -28,15 +28,10 @@
// SFINAE requires variadic templates
#if !defined(EIGEN_GPUCC)
#if EIGEN_HAS_VARIADIC_TEMPLATES
// SFINAE doesn't work for gcc <= 4.7
#ifdef EIGEN_COMP_GNUC
#if EIGEN_GNUC_AT_LEAST(4,8)
#define EIGEN_HAS_SFINAE
#endif
#else
#define EIGEN_HAS_SFINAE
#endif
#ifdef EIGEN_COMP_GNUC
#define EIGEN_HAS_SFINAE
#else
#define EIGEN_HAS_SFINAE
#endif
#endif

View File

@ -84,35 +84,11 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
EIGEN_STATIC_ASSERT((0 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions(firstDimension, otherDimensions...) {
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT((sizeof...(otherDimensions) + 1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions(firstDimension) {
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT((1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2) : m_data(dataPtr), m_dimensions(dim1, dim2) {
EIGEN_STATIC_ASSERT(2 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3) {
EIGEN_STATIC_ASSERT(3 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4) {
EIGEN_STATIC_ASSERT(4 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4, dim5) {
EIGEN_STATIC_ASSERT(5 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
#endif
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, const array<Index, NumIndices>& dimensions)
: m_data(dataPtr), m_dimensions(dimensions)
@ -167,7 +143,6 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
return m_data[index];
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
@ -181,52 +156,6 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
return m_data[index];
}
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1) const
{
if (PlainObjectType::Options&RowMajor) {
const Index index = i1 + i0 * m_dimensions[1];
return m_data[index];
} else {
const Index index = i0 + i1 * m_dimensions[0];
return m_data[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2) const
{
if (PlainObjectType::Options&RowMajor) {
const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
return m_data[index];
} else {
const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2);
return m_data[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3) const
{
if (PlainObjectType::Options&RowMajor) {
const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0));
return m_data[index];
} else {
const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3));
return m_data[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
{
if (PlainObjectType::Options&RowMajor) {
const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)));
return m_data[index];
} else {
const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4)));
return m_data[index];
}
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(const array<Index, NumIndices>& indices)
@ -255,7 +184,6 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
return m_data[index];
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
@ -270,52 +198,6 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
return m_data[index];
}
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1)
{
if (PlainObjectType::Options&RowMajor) {
const Index index = i1 + i0 * m_dimensions[1];
return m_data[index];
} else {
const Index index = i0 + i1 * m_dimensions[0];
return m_data[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2)
{
if (PlainObjectType::Options&RowMajor) {
const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
return m_data[index];
} else {
const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2);
return m_data[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3)
{
if (PlainObjectType::Options&RowMajor) {
const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0));
return m_data[index];
} else {
const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3));
return m_data[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
{
if (PlainObjectType::Options&RowMajor) {
const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)));
return m_data[index];
} else {
const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4)));
return m_data[index];
}
}
#endif
EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorMap)

View File

@ -108,7 +108,7 @@ struct preserve_inner_most_dims {
static const bool value = false;
};
#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES
#if EIGEN_HAS_CONSTEXPR
template <typename ReducedDims, int NumTensorDims>
struct are_inner_most_dims<ReducedDims, NumTensorDims, ColMajor>{
static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();

View File

@ -206,7 +206,6 @@ template<typename PlainObjectType> class TensorRef : public TensorBase<TensorRef
return m_evaluator->coeff(index);
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar operator()(Index firstIndex, IndexTypes... otherIndices) const
{
@ -221,85 +220,6 @@ template<typename PlainObjectType> class TensorRef : public TensorBase<TensorRef
const array<Index, num_indices> indices{{firstIndex, otherIndices...}};
return coeffRef(indices);
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1) const
{
array<Index, 2> indices;
indices[0] = i0;
indices[1] = i1;
return coeff(indices);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2) const
{
array<Index, 3> indices;
indices[0] = i0;
indices[1] = i1;
indices[2] = i2;
return coeff(indices);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3) const
{
array<Index, 4> indices;
indices[0] = i0;
indices[1] = i1;
indices[2] = i2;
indices[3] = i3;
return coeff(indices);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
{
array<Index, 5> indices;
indices[0] = i0;
indices[1] = i1;
indices[2] = i2;
indices[3] = i3;
indices[4] = i4;
return coeff(indices);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1)
{
array<Index, 2> indices;
indices[0] = i0;
indices[1] = i1;
return coeffRef(indices);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2)
{
array<Index, 3> indices;
indices[0] = i0;
indices[1] = i1;
indices[2] = i2;
return coeffRef(indices);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
{
array<Index, 4> indices;
indices[0] = i0;
indices[1] = i1;
indices[2] = i2;
indices[3] = i3;
return coeffRef(indices);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2, Index i3, Index i4)
{
array<Index, 5> indices;
indices[0] = i0;
indices[1] = i1;
indices[2] = i2;
indices[3] = i3;
indices[4] = i4;
return coeffRef(indices);
}
#endif
template <std::size_t NumIndices> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar coeff(const array<Index, NumIndices>& indices) const

View File

@ -88,12 +88,10 @@ class TensorStorage<T, DSizes<IndexType, NumIndices_>, Options_>
: m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(size)), m_dimensions(dimensions)
{ EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN }
#if EIGEN_HAS_VARIADIC_TEMPLATES
template <typename... DenseIndex>
EIGEN_DEVICE_FUNC TensorStorage(DenseIndex... indices) : m_dimensions(indices...) {
m_data = internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(internal::array_prod(m_dimensions));
}
#endif
EIGEN_DEVICE_FUNC TensorStorage(const Self& other)
: m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(internal::array_prod(other.m_dimensions)))

View File

@ -18,9 +18,7 @@
#else
#if ((EIGEN_COMP_GNUC && EIGEN_GNUC_AT_LEAST(4, 8)) || \
__has_feature(cxx_thread_local) || \
(EIGEN_COMP_MSVC >= 1900) )
#if ((EIGEN_COMP_GNUC) || __has_feature(cxx_thread_local) || EIGEN_COMP_MSVC )
#define EIGEN_THREAD_LOCAL static thread_local
#endif

View File

@ -10,10 +10,8 @@
#ifndef EIGEN_EMULATE_ARRAY_H
#define EIGEN_EMULATE_ARRAY_H
// The array class is only available starting with cxx11. Emulate our own here
// if needed. Beware, msvc still doesn't advertise itself as a c++11 compiler!
// Moreover, CUDA doesn't support the STL containers, so we use our own instead.
#if (__cplusplus <= 199711L && EIGEN_COMP_MSVC < 1900) || defined(EIGEN_GPUCC) || defined(EIGEN_AVOID_STL_ARRAY)
// CUDA doesn't support the STL containers, so we use our own instead.
#if defined(EIGEN_GPUCC) || defined(EIGEN_AVOID_STL_ARRAY)
namespace Eigen {
template <typename T, size_t n> class array {
@ -152,13 +150,11 @@ template <typename T, size_t n> class array {
values[7] = v8;
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(std::initializer_list<T> l) {
eigen_assert(l.size() == n);
internal::smart_copy(l.begin(), l.end(), values);
}
#endif
};
@ -202,12 +198,10 @@ template <typename T> class array<T, 0> {
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array() : dummy() { }
#if EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC array(std::initializer_list<T> l) : dummy() {
EIGEN_UNUSED_VARIABLE(l);
eigen_assert(l.size() == 0);
}
#endif
private:
T dummy;

View File

@ -22,17 +22,8 @@ public:
AutoDiffJacobian(const Functor& f) : Functor(f) {}
// forward constructors
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... T>
AutoDiffJacobian(const T& ...Values) : Functor(Values...) {}
#else
template<typename T0>
AutoDiffJacobian(const T0& a0) : Functor(a0) {}
template<typename T0, typename T1>
AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {}
template<typename T0, typename T1, typename T2>
AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {}
#endif
typedef typename Functor::InputType InputType;
typedef typename Functor::ValueType ValueType;
@ -52,7 +43,6 @@ public:
typedef Matrix<ActiveScalar, InputsAtCompileTime, 1> ActiveInput;
typedef Matrix<ActiveScalar, ValuesAtCompileTime, 1> ActiveValue;
#if EIGEN_HAS_VARIADIC_TEMPLATES
// Some compilers don't accept variadic parameters after a default parameter,
// i.e., we can't just write _jac=0 but we need to overload operator():
EIGEN_STRONG_INLINE
@ -63,19 +53,12 @@ public:
template<typename... ParamsType>
void operator() (const InputType& x, ValueType* v, JacobianType* _jac,
const ParamsType&... Params) const
#else
void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const
#endif
{
eigen_assert(v!=0);
if (!_jac)
{
#if EIGEN_HAS_VARIADIC_TEMPLATES
Functor::operator()(x, v, Params...);
#else
Functor::operator()(x, v);
#endif
return;
}
@ -91,11 +74,7 @@ public:
for (Index i=0; i<jac.cols(); i++)
ax[i].derivatives() = DerivativeType::Unit(x.rows(),i);
#if EIGEN_HAS_VARIADIC_TEMPLATES
Functor::operator()(ax, &av, Params...);
#else
Functor::operator()(ax, &av);
#endif
for (Index i=0; i<jac.rows(); i++)
{

View File

@ -106,7 +106,6 @@ struct TestFunc1
};
#if EIGEN_HAS_VARIADIC_TEMPLATES
/* Test functor for the C++11 features. */
template <typename Scalar>
struct integratorFunctor
@ -186,7 +185,6 @@ template<typename Func> void forward_jacobian_cpp11(const Func& f)
VERIFY_IS_APPROX(y, yref);
VERIFY_IS_APPROX(j, jref);
}
#endif
template<typename Func> void forward_jacobian(const Func& f)
{
@ -247,9 +245,7 @@ void test_autodiff_jacobian()
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,2>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,3>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double>(3,3)) ));
#if EIGEN_HAS_VARIADIC_TEMPLATES
CALL_SUBTEST(( forward_jacobian_cpp11(integratorFunctor<double>(10)) ));
#endif
}

View File

@ -280,7 +280,6 @@ static void test_compound_assign()
}
static void test_std_initializers_tensor() {
#if EIGEN_HAS_VARIADIC_TEMPLATES
Tensor<int, 1> a(3);
a.setValues({0, 1, 2});
VERIFY_IS_EQUAL(a(0), 0);
@ -349,7 +348,6 @@ static void test_std_initializers_tensor() {
VERIFY_IS_EQUAL(c(2, 1, 1), 25);
VERIFY_IS_EQUAL(c(2, 1, 2), 26);
VERIFY_IS_EQUAL(c(2, 1, 3), 27);
#endif // EIGEN_HAS_VARIADIC_TEMPLATES
}
EIGEN_DECLARE_TEST(cxx11_tensor_assign)

View File

@ -91,15 +91,7 @@ static void test_vectorized_broadcasting()
}
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
tensor.resize(11,3,5);
#else
array<Index, 3> new_dims;
new_dims[0] = 11;
new_dims[1] = 3;
new_dims[2] = 5;
tensor.resize(new_dims);
#endif
tensor.setRandom();
broadcast = tensor.broadcast(broadcasts);
@ -148,15 +140,7 @@ static void test_static_broadcasting()
}
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
tensor.resize(11,3,5);
#else
array<Index, 3> new_dims;
new_dims[0] = 11;
new_dims[1] = 3;
new_dims[2] = 5;
tensor.resize(new_dims);
#endif
tensor.setRandom();
broadcast = tensor.broadcast(broadcasts);