From 3ed67cb0bb4af65fbf243df598604a8c7630bf7d Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Tue, 4 Oct 2016 14:22:56 -0700 Subject: [PATCH 01/15] Fix a bug in the implementation of Carmack's fast sqrt algorithm in Eigen (enabled by EIGEN_FAST_MATH), which causes the vectorized parts of the computation to return -0.0 instead of NaN for negative arguments. Benchmark speed in Giga-sqrts/s Intel(R) Xeon(R) CPU E5-1650 v3 @ 3.50GHz ----------------------------------------- SSE AVX Fast=1 2.529G 4.380G Fast=0 1.944G 1.898G Fast=1 fixed 2.214G 3.739G This table illustrates the worst case in terms speed impact: It was measured by repeatedly computing the sqrt of an n=4096 float vector that fits in L1 cache. For large vectors the operation becomes memory bound and the differences between the different versions almost negligible. --- Eigen/src/Core/arch/AVX/MathFunctions.h | 24 +++++++------------ Eigen/src/Core/arch/SSE/MathFunctions.h | 19 ++++++++------- test/packetmath.cpp | 31 +++++++++++-------------- 3 files changed, 34 insertions(+), 40 deletions(-) diff --git a/Eigen/src/Core/arch/AVX/MathFunctions.h b/Eigen/src/Core/arch/AVX/MathFunctions.h index d21ec39dd..da70b6636 100644 --- a/Eigen/src/Core/arch/AVX/MathFunctions.h +++ b/Eigen/src/Core/arch/AVX/MathFunctions.h @@ -362,23 +362,17 @@ pexp(const Packet4d& _x) { template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f psqrt(const Packet8f& _x) { - _EIGEN_DECLARE_CONST_Packet8f(one_point_five, 1.5f); - _EIGEN_DECLARE_CONST_Packet8f(minus_half, -0.5f); - _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(flt_min, 0x00800000); - - Packet8f neg_half = pmul(_x, p8f_minus_half); - - // select only the inverse sqrt of positive normal inputs (denormals are - // flushed to zero and cause infs as well). - Packet8f non_zero_mask = _mm256_cmp_ps(_x, p8f_flt_min, _CMP_GE_OQ); - Packet8f x = _mm256_and_ps(non_zero_mask, _mm256_rsqrt_ps(_x)); + Packet8f half = pmul(_x, pset1(.5f)); + Packet8f denormal_mask = _mm256_and_ps( + _mm256_cmpge_ps(_x, _mm256_setzero_ps()), + _mm256_cmplt_ps(_x, pset1((std::numeric_limits::min)()))); + // Compute approximate reciprocal sqrt. + Packet8f x = _mm256_rsqrt_ps(_x); // Do a single step of Newton's iteration. - x = pmul(x, pmadd(neg_half, pmul(x, x), p8f_one_point_five)); - - // Multiply the original _x by it's reciprocal square root to extract the - // square root. - return pmul(_x, x); + x = pmul(x, psub(pset1(1.5f), pmul(half, pmul(x,x)))); + // Flush results for denormals to zero. + return _mm256_andnot_ps(denormal_mask, pmul(_x,x)); } #else template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED diff --git a/Eigen/src/Core/arch/SSE/MathFunctions.h b/Eigen/src/Core/arch/SSE/MathFunctions.h index ac2fd8103..2c34a869d 100644 --- a/Eigen/src/Core/arch/SSE/MathFunctions.h +++ b/Eigen/src/Core/arch/SSE/MathFunctions.h @@ -32,7 +32,7 @@ Packet4f plog(const Packet4f& _x) /* the smallest non denormalized float number */ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000); _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000);//-1.f/0.f); - + /* natural logarithm computed for 4 simultaneous float return NaN for x <= 0 */ @@ -451,18 +451,21 @@ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt(const Packet4f& _x) { Packet4f half = pmul(_x, pset1(.5f)); + Packet4f denormal_mask = _mm_and_ps( + _mm_cmpge_ps(_x, _mm_setzero_ps()), + _mm_cmplt_ps(_x, pset1((std::numeric_limits::min)()))); - /* select only the inverse sqrt of non-zero inputs */ - Packet4f non_zero_mask = _mm_cmpge_ps(_x, pset1((std::numeric_limits::min)())); - Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x)); - + // Compute approximate reciprocal sqrt. + Packet4f x = _mm_rsqrt_ps(_x); + // Do a single step of Newton's iteration. x = pmul(x, psub(pset1(1.5f), pmul(half, pmul(x,x)))); - return pmul(_x,x); + // Flush results for denormals to zero. + return _mm_andnot_ps(denormal_mask, pmul(_x,x)); } #else -template<>EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +template<>EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt(const Packet4f& x) { return _mm_sqrt_ps(x); } #endif @@ -491,7 +494,7 @@ Packet4f prsqrt(const Packet4f& _x) { Packet4f neg_mask = _mm_cmplt_ps(_x, _mm_setzero_ps()); Packet4f zero_mask = _mm_andnot_ps(neg_mask, le_zero_mask); Packet4f infs_and_nans = _mm_or_ps(_mm_and_ps(neg_mask, p4f_nan), - _mm_and_ps(zero_mask, p4f_inf)); + _mm_and_ps(zero_mask, p4f_inf)); // Do a single step of Newton's iteration. x = pmul(x, pmadd(neg_half, pmul(x, x), p4f_one_point_five)); diff --git a/test/packetmath.cpp b/test/packetmath.cpp index 1394d9f2b..c3d3e1521 100644 --- a/test/packetmath.cpp +++ b/test/packetmath.cpp @@ -193,7 +193,7 @@ template void packetmath() internal::pstore(data2+3*PacketSize, A3); VERIFY(areApprox(ref, data2, 4*PacketSize) && "internal::pbroadcast4"); } - + { for (int i=0; i void packetmath() internal::pstore(data2+1*PacketSize, A1); VERIFY(areApprox(ref, data2, 2*PacketSize) && "internal::pbroadcast2"); } - + VERIFY(internal::isApprox(data1[0], internal::pfirst(internal::pload(data1))) && "internal::pfirst"); - + if(PacketSize>1) { for(int offset=0;offset<4;++offset) @@ -315,7 +315,7 @@ template void packetmath_real() CHECK_CWISE1_IF(PacketTraits::HasRound, numext::round, internal::pround); CHECK_CWISE1_IF(PacketTraits::HasCeil, numext::ceil, internal::pceil); CHECK_CWISE1_IF(PacketTraits::HasFloor, numext::floor, internal::pfloor); - + for (int i=0; i(-1,1); @@ -440,12 +440,9 @@ template void packetmath_real() data1[0] = Scalar(-1.0f); h.store(data2, internal::plog(h.load(data1))); VERIFY((numext::isnan)(data2[0])); -#if !EIGEN_FAST_MATH h.store(data2, internal::psqrt(h.load(data1))); VERIFY((numext::isnan)(data2[0])); VERIFY((numext::isnan)(data2[1])); -#endif - } } @@ -459,7 +456,7 @@ template void packetmath_notcomplex() EIGEN_ALIGN_MAX Scalar data1[PacketTraits::size*4]; EIGEN_ALIGN_MAX Scalar data2[PacketTraits::size*4]; EIGEN_ALIGN_MAX Scalar ref[PacketTraits::size*4]; - + Array::Map(data1, PacketTraits::size*4).setRandom(); ref[0] = data1[0]; @@ -478,7 +475,7 @@ template void packetmath_notcomplex() for (int i=0; i(data1))) && "internal::predux_max"); - + for (int i=0; i(data1[0])); @@ -490,12 +487,12 @@ template void test_conj_helper(Scalar typedef internal::packet_traits PacketTraits; typedef typename PacketTraits::type Packet; const int PacketSize = PacketTraits::size; - + internal::conj_if cj0; internal::conj_if cj1; internal::conj_helper cj; internal::conj_helper pcj; - + for(int i=0;i void test_conj_helper(Scalar } internal::pstore(pval,pcj.pmul(internal::pload(data1),internal::pload(data2))); VERIFY(areApprox(ref, pval, PacketSize) && "conj_helper pmul"); - + for(int i=0;i void packetmath_complex() data1[i] = internal::random() * Scalar(1e2); data2[i] = internal::random() * Scalar(1e2); } - + test_conj_helper (data1,data2,ref,pval); test_conj_helper (data1,data2,ref,pval); test_conj_helper (data1,data2,ref,pval); test_conj_helper (data1,data2,ref,pval); - + { for(int i=0;i void packetmath_scatter_gather() for (int i=0; i()/RealScalar(PacketSize); } - + int stride = internal::random(1,20); - + EIGEN_ALIGN_MAX Scalar buffer[PacketSize*20]; memset(buffer, 0, 20*sizeof(Packet)); Packet packet = internal::pload(data1); @@ -594,7 +591,7 @@ void test_packetmath() CALL_SUBTEST_1( packetmath_notcomplex() ); CALL_SUBTEST_2( packetmath_notcomplex() ); CALL_SUBTEST_3( packetmath_notcomplex() ); - + CALL_SUBTEST_1( packetmath_real() ); CALL_SUBTEST_2( packetmath_real() ); From 765615609d40f485c718d60b76b35a277c36d5ce Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Tue, 4 Oct 2016 15:08:41 -0700 Subject: [PATCH 02/15] Update comment for fast sqrt. --- Eigen/src/Core/arch/SSE/MathFunctions.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Eigen/src/Core/arch/SSE/MathFunctions.h b/Eigen/src/Core/arch/SSE/MathFunctions.h index 2c34a869d..7b5f948e1 100644 --- a/Eigen/src/Core/arch/SSE/MathFunctions.h +++ b/Eigen/src/Core/arch/SSE/MathFunctions.h @@ -444,9 +444,14 @@ Packet4f pcos(const Packet4f& _x) #if EIGEN_FAST_MATH -// This is based on Quake3's fast inverse square root. +// Functions for sqrt. +// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step +// of Newton's method, at a cost of 1-2 bits of precision as opposed to the +// exact solution. It does not handle +inf, or denormalized numbers correctly. +// The main advantage of this approach is not just speed, but also the fact that +// it can be inlined and pipelined with other computations, further reducing its +// effective latency. This is similar to Quake3's fast inverse square root. // For detail see here: http://www.beyond3d.com/content/articles/8/ -// It lacks 1 (or 2 bits in some rare cases) of precision, and does not handle negative, +inf, or denormalized numbers correctly. template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt(const Packet4f& _x) { From 7f67e6dfdb635ed1e744c0794bb1f3dd4306678f Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Tue, 4 Oct 2016 15:09:11 -0700 Subject: [PATCH 03/15] Update comment for fast sqrt. --- Eigen/src/Core/arch/AVX/MathFunctions.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Eigen/src/Core/arch/AVX/MathFunctions.h b/Eigen/src/Core/arch/AVX/MathFunctions.h index da70b6636..25cd992ef 100644 --- a/Eigen/src/Core/arch/AVX/MathFunctions.h +++ b/Eigen/src/Core/arch/AVX/MathFunctions.h @@ -355,9 +355,11 @@ pexp(const Packet4d& _x) { // Functions for sqrt. // The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step // of Newton's method, at a cost of 1-2 bits of precision as opposed to the -// exact solution. The main advantage of this approach is not just speed, but -// also the fact that it can be inlined and pipelined with other computations, -// further reducing its effective latency. +// exact solution. It does not handle +inf, or denormalized numbers correctly. +// The main advantage of this approach is not just speed, but also the fact that +// it can be inlined and pipelined with other computations, further reducing its +// effective latency. This is similar to Quake3's fast inverse square root. +// For detail see here: http://www.beyond3d.com/content/articles/8/ #if EIGEN_FAST_MATH template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f From 80b513378948f78bc7729c431eb68ca5513a1d62 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Thu, 6 Oct 2016 09:55:50 +0200 Subject: [PATCH 04/15] Fix compilation of qr.inverse() for column and full pivoting variants. --- Eigen/src/QR/ColPivHouseholderQR.h | 6 +++--- Eigen/src/QR/FullPivHouseholderQR.h | 6 +++--- test/qr_colpivoting.cpp | 12 ++++++++++++ test/qr_fullpivoting.cpp | 12 ++++++++++++ 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h index 9650781d6..618f80480 100644 --- a/Eigen/src/QR/ColPivHouseholderQR.h +++ b/Eigen/src/QR/ColPivHouseholderQR.h @@ -613,12 +613,12 @@ void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType & namespace internal { -template -struct Assignment >, internal::assign_op, Dense2Dense> +template +struct Assignment >, internal::assign_op::Scalar>, Dense2Dense> { typedef ColPivHouseholderQR QrType; typedef Inverse SrcXprType; - static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) { dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); } diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h index e0e15100d..e489bddc2 100644 --- a/Eigen/src/QR/FullPivHouseholderQR.h +++ b/Eigen/src/QR/FullPivHouseholderQR.h @@ -575,12 +575,12 @@ void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType namespace internal { -template -struct Assignment >, internal::assign_op, Dense2Dense> +template +struct Assignment >, internal::assign_op::Scalar>, Dense2Dense> { typedef FullPivHouseholderQR QrType; typedef Inverse SrcXprType; - static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) { dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); } diff --git a/test/qr_colpivoting.cpp b/test/qr_colpivoting.cpp index 057bb014c..26ed27f5c 100644 --- a/test/qr_colpivoting.cpp +++ b/test/qr_colpivoting.cpp @@ -141,6 +141,18 @@ template void qr() m2 = MatrixType::Random(cols,cols2); m2 = qr.solve(m3); VERIFY_IS_APPROX(m3, m1*m2); + + { + Index size = rows; + do { + m1 = MatrixType::Random(size,size); + qr.compute(m1); + } while(!qr.isInvertible()); + MatrixType m1_inv = qr.inverse(); + m3 = m1 * MatrixType::Random(size,cols2); + m2 = qr.solve(m3); + VERIFY_IS_APPROX(m2, m1_inv*m3); + } } template void qr_fixedsize() diff --git a/test/qr_fullpivoting.cpp b/test/qr_fullpivoting.cpp index 05a705887..70e89c198 100644 --- a/test/qr_fullpivoting.cpp +++ b/test/qr_fullpivoting.cpp @@ -54,6 +54,18 @@ template void qr() m2 = MatrixType::Random(cols,cols2); m2 = qr.solve(m3); VERIFY_IS_APPROX(m3, m1*m2); + + { + Index size = rows; + do { + m1 = MatrixType::Random(size,size); + qr.compute(m1); + } while(!qr.isInvertible()); + MatrixType m1_inv = qr.inverse(); + m3 = m1 * MatrixType::Random(size,cols2); + m2 = qr.solve(m3); + VERIFY_IS_APPROX(m2, m1_inv*m3); + } } template void qr_invertible() From d485d12c51bc46286f7439377e3ab591f67ddbbf Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Thu, 6 Oct 2016 10:41:03 -0700 Subject: [PATCH 05/15] Added missing AVX intrinsics for fp16: in particular, implemented predux which is required by the matrix-vector code. --- Eigen/src/Core/arch/CUDA/PacketMathHalf.h | 24 +++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/Eigen/src/Core/arch/CUDA/PacketMathHalf.h b/Eigen/src/Core/arch/CUDA/PacketMathHalf.h index 82dfc12c9..9dd89e07f 100644 --- a/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +++ b/Eigen/src/Core/arch/CUDA/PacketMathHalf.h @@ -492,6 +492,30 @@ template<> EIGEN_STRONG_INLINE void pscatter(Eigen::half* to[stride*7].x = aux[7].x; } +template<> EIGEN_STRONG_INLINE Eigen::half predux(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_max(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux_max(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_min(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux_min(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_mul(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux_mul(af); + return Eigen::half(reduced); +} + EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { __m128i a = kernel.packet[0].x; From e2e9cdd16970914cf0a892fea5e7c4402b3ede41 Mon Sep 17 00:00:00 2001 From: RJ Ryan Date: Thu, 6 Oct 2016 10:49:48 -0700 Subject: [PATCH 06/15] Fully support complex types in SumReducer and MeanReducer when building for CUDA by using scalar_sum_op and scalar_product_op instead of operator+ and operator*. --- unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h index 7164e8d60..d73f6dc68 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h @@ -124,7 +124,8 @@ template struct SumReducer } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { - return saccum + predux(vaccum); + internal::scalar_sum_op sum_op; + return sum_op(saccum, predux(vaccum)); } }; @@ -173,7 +174,8 @@ template struct MeanReducer } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { - return (saccum + predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits::size); + internal::scalar_sum_op sum_op; + return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits::size); } protected: @@ -304,7 +306,8 @@ template struct ProdReducer static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { - (*accum) *= t; + internal::scalar_product_op prod_op; + (*accum) = prod_op(*accum, t); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { @@ -328,7 +331,8 @@ template struct ProdReducer } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { - return saccum * predux_mul(vaccum); + internal::scalar_product_op prod_op; + return prod_op(saccum, predux_mul(vaccum)); } }; From bfc264abe86541632d17b146a8601a6999a0f8d6 Mon Sep 17 00:00:00 2001 From: RJ Ryan Date: Thu, 6 Oct 2016 11:10:14 -0700 Subject: [PATCH 07/15] Add a test that GPU complex product reductions match CPU reductions. --- unsupported/test/cxx11_tensor_complex_cuda.cu | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/unsupported/test/cxx11_tensor_complex_cuda.cu b/unsupported/test/cxx11_tensor_complex_cuda.cu index f895efd01..d4e111f5d 100644 --- a/unsupported/test/cxx11_tensor_complex_cuda.cu +++ b/unsupported/test/cxx11_tensor_complex_cuda.cu @@ -108,8 +108,46 @@ static void test_cuda_sum_reductions() { } +static void test_cuda_product_reductions() { + + Eigen::CudaStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + const int num_rows = internal::random(1024, 5*1024); + const int num_cols = internal::random(1024, 5*1024); + + Tensor, 2> in(num_rows, num_cols); + in.setRandom(); + + Tensor, 0> full_redux; + full_redux = in.prod(); + + std::size_t in_bytes = in.size() * sizeof(std::complex); + std::size_t out_bytes = full_redux.size() * sizeof(std::complex); + std::complex* gpu_in_ptr = static_cast*>(gpu_device.allocate(in_bytes)); + std::complex* gpu_out_ptr = static_cast*>(gpu_device.allocate(out_bytes)); + gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); + + TensorMap, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); + TensorMap, 0> > out_gpu(gpu_out_ptr); + + out_gpu.device(gpu_device) = in_gpu.prod(); + + Tensor, 0> full_redux_gpu; + gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); + gpu_device.synchronize(); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); + + gpu_device.deallocate(gpu_in_ptr); + gpu_device.deallocate(gpu_out_ptr); +} + + void test_cxx11_tensor_complex() { CALL_SUBTEST(test_cuda_nullary()); CALL_SUBTEST(test_cuda_sum_reductions()); + CALL_SUBTEST(test_cuda_product_reductions()); } From 4860727ac2f404bddd23ba60896bc1288f0de06c Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Fri, 7 Oct 2016 09:21:12 +0200 Subject: [PATCH 08/15] Remove static qualifier of free-functions (inline is enough and this helps ICC to find the right overload) --- Eigen/src/Geometry/Scaling.h | 12 ++++++------ test/geo_transformations.cpp | 0 2 files changed, 6 insertions(+), 6 deletions(-) mode change 100644 => 100755 Eigen/src/Geometry/Scaling.h mode change 100644 => 100755 test/geo_transformations.cpp diff --git a/Eigen/src/Geometry/Scaling.h b/Eigen/src/Geometry/Scaling.h old mode 100644 new mode 100755 index 3e12681b0..f58ca03d9 --- a/Eigen/src/Geometry/Scaling.h +++ b/Eigen/src/Geometry/Scaling.h @@ -118,28 +118,28 @@ operator*(const MatrixBase& matrix, const UniformScaling& s) { return matrix.derived() * s.factor(); } /** Constructs a uniform scaling from scale factor \a s */ -static inline UniformScaling Scaling(float s) { return UniformScaling(s); } +inline UniformScaling Scaling(float s) { return UniformScaling(s); } /** Constructs a uniform scaling from scale factor \a s */ -static inline UniformScaling Scaling(double s) { return UniformScaling(s); } +inline UniformScaling Scaling(double s) { return UniformScaling(s); } /** Constructs a uniform scaling from scale factor \a s */ template -static inline UniformScaling > Scaling(const std::complex& s) +inline UniformScaling > Scaling(const std::complex& s) { return UniformScaling >(s); } /** Constructs a 2D axis aligned scaling */ template -static inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy) +inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy) { return DiagonalMatrix(sx, sy); } /** Constructs a 3D axis aligned scaling */ template -static inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz) +inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz) { return DiagonalMatrix(sx, sy, sz); } /** Constructs an axis aligned scaling expression from vector expression \a coeffs * This is an alias for coeffs.asDiagonal() */ template -static inline const DiagonalWrapper Scaling(const MatrixBase& coeffs) +inline const DiagonalWrapper Scaling(const MatrixBase& coeffs) { return coeffs.asDiagonal(); } /** \deprecated */ diff --git a/test/geo_transformations.cpp b/test/geo_transformations.cpp old mode 100644 new mode 100755 From 5266ff8966c072defd5bfcfa74c1892fc500943b Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Sat, 8 Oct 2016 19:12:44 +0000 Subject: [PATCH 09/15] Cleaned up a regression test --- unsupported/test/cxx11_tensor_argmax_cuda.cu | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/unsupported/test/cxx11_tensor_argmax_cuda.cu b/unsupported/test/cxx11_tensor_argmax_cuda.cu index 6fe8982f2..653443dc5 100644 --- a/unsupported/test/cxx11_tensor_argmax_cuda.cu +++ b/unsupported/test/cxx11_tensor_argmax_cuda.cu @@ -116,10 +116,10 @@ void test_cuda_argmax_dim() assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); - VERIFY_IS_EQUAL(tensor_arg.dimensions().TotalSize(), + VERIFY_IS_EQUAL(tensor_arg.size(), size_t(2*3*5*7 / tensor.dimension(dim))); - for (size_t n = 0; n < tensor_arg.dimensions().TotalSize(); ++n) { + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { // Expect max to be in the first index of the reduced dimension VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); } @@ -144,7 +144,7 @@ void test_cuda_argmax_dim() assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); - for (size_t n = 0; n < tensor_arg.dimensions().TotalSize(); ++n) { + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { // Expect max to be in the last index of the reduced dimension VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1); } @@ -205,10 +205,10 @@ void test_cuda_argmin_dim() assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); - VERIFY_IS_EQUAL(tensor_arg.dimensions().TotalSize(), - size_t(2*3*5*7 / tensor.dimension(dim))); + VERIFY_IS_EQUAL(tensor_arg.size(), + 2*3*5*7 / tensor.dimension(dim)); - for (size_t n = 0; n < tensor_arg.dimensions().TotalSize(); ++n) { + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { // Expect min to be in the first index of the reduced dimension VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); } @@ -233,7 +233,7 @@ void test_cuda_argmin_dim() assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); - for (size_t n = 0; n < tensor_arg.dimensions().TotalSize(); ++n) { + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { // Expect max to be in the last index of the reduced dimension VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1); } From 5727e4d89c0fbf44192e116038b2beea2af19768 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Sat, 8 Oct 2016 22:19:03 +0000 Subject: [PATCH 10/15] Reenabled the use of variadic templates on tegra x1 provides that the latest version (i.e. JetPack 2.3) is used. --- Eigen/src/Core/util/Macros.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index d65f92532..318ab9477 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -392,8 +392,8 @@ // Does the compiler support variadic templates? #ifndef EIGEN_HAS_VARIADIC_TEMPLATES #if EIGEN_MAX_CPP_VER>=11 && (__cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900) \ - && ( !defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 ) - // ^^ Disable the use of variadic templates when compiling with nvcc on ARM devices: + && ( !defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000) ) + // ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices: // this prevents nvcc from crashing when compiling Eigen on Tegra X1 #define EIGEN_HAS_VARIADIC_TEMPLATES 1 #else From 7f0599b6eb45c8a1a1aae9db32408d64eb7f5d45 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Sat, 8 Oct 2016 22:56:32 -0700 Subject: [PATCH 11/15] Manually define int16_t and uint16_t when compiling with Visual Studio --- unsupported/Eigen/CXX11/Tensor | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor index 4976a1254..73aae3da8 100644 --- a/unsupported/Eigen/CXX11/Tensor +++ b/unsupported/Eigen/CXX11/Tensor @@ -34,6 +34,8 @@ #include #ifdef _WIN32 +typedef __int16 int16_t; +typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; From 89e315152c759f298c144fd25f67b1a9a375ee08 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Wed, 12 Oct 2016 16:55:47 +0200 Subject: [PATCH 12/15] bug #1325: fix compilation on NEON with clang --- Eigen/src/Core/arch/NEON/Complex.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/Eigen/src/Core/arch/NEON/Complex.h b/Eigen/src/Core/arch/NEON/Complex.h index 3e121dce5..57e9b431f 100644 --- a/Eigen/src/Core/arch/NEON/Complex.h +++ b/Eigen/src/Core/arch/NEON/Complex.h @@ -16,8 +16,14 @@ namespace Eigen { namespace internal { inline uint32x4_t p4ui_CONJ_XOR() { +// See bug 1325, clang fails to call vld1q_u64. +#if EIGEN_COMP_CLANG + uint32x4_t ret = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; + return ret; +#else static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; return vld1q_u32( conj_XOR_DATA ); +#endif } inline uint32x2_t p2ui_CONJ_XOR() { @@ -282,8 +288,13 @@ ptranspose(PacketBlock& kernel) { //---------- double ---------- #if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG -const uint64_t p2ul_conj_XOR_DATA[] = { 0x0, 0x8000000000000000 }; -static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA ); +// See bug 1325, clang fails to call vld1q_u64. +#if EIGEN_COMP_CLANG + static uint64x2_t p2ul_CONJ_XOR = {0x0, 0x8000000000000000}; +#else + const uint64_t p2ul_conj_XOR_DATA[] = { 0x0, 0x8000000000000000 }; + static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA ); +#endif struct Packet1cd { From 47150af1c8e22174296d6b2077d719c5b786b1b0 Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Wed, 12 Oct 2016 08:34:39 -0700 Subject: [PATCH 13/15] Fix copy-paste error: Must use _mm256_cmp_ps for AVX. --- Eigen/src/Core/arch/AVX/MathFunctions.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Eigen/src/Core/arch/AVX/MathFunctions.h b/Eigen/src/Core/arch/AVX/MathFunctions.h index 25cd992ef..6af67ce2d 100644 --- a/Eigen/src/Core/arch/AVX/MathFunctions.h +++ b/Eigen/src/Core/arch/AVX/MathFunctions.h @@ -366,8 +366,9 @@ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f psqrt(const Packet8f& _x) { Packet8f half = pmul(_x, pset1(.5f)); Packet8f denormal_mask = _mm256_and_ps( - _mm256_cmpge_ps(_x, _mm256_setzero_ps()), - _mm256_cmplt_ps(_x, pset1((std::numeric_limits::min)()))); + _mm256_cmp_ps(_x, pset1((std::numeric_limits::min)()), + _CMP_LT_OQ), + _mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_GE_OQ)); // Compute approximate reciprocal sqrt. Packet8f x = _mm256_rsqrt_ps(_x); From 091d373ee9eebff409e5e11a911acc0a4692b54a Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Wed, 12 Oct 2016 21:47:52 +0200 Subject: [PATCH 14/15] Fix outer-stride. --- unsupported/Eigen/AlignedVector3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3 index 135eec572..47a86d4c0 100644 --- a/unsupported/Eigen/AlignedVector3 +++ b/unsupported/Eigen/AlignedVector3 @@ -61,7 +61,7 @@ template class AlignedVector3 Scalar* data() { return m_coeffs.data(); } const Scalar* data() const { return m_coeffs.data(); } Index innerStride() const { return 1; } - Index outerStride() const { return m_coeffs.outerStride(); } + Index outerStride() const { return 3; } inline const Scalar& coeff(Index row, Index col) const { return m_coeffs.coeff(row, col); } From f939c351cbfcb1007943fe6062503bc455b692e1 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Wed, 12 Oct 2016 22:39:33 +0200 Subject: [PATCH 15/15] Fix SPQR for rectangular matrices --- Eigen/src/SPQRSupport/SuiteSparseQRSupport.h | 7 ++++--- test/spqr_support.cpp | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h index d9c3113e7..953d57c9d 100644 --- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h @@ -119,16 +119,16 @@ class SPQR : public SparseSolverBase > max2Norm = RealScalar(1); pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits::epsilon(); } - cholmod_sparse A; A = viewAsCholmod(mat); + m_rows = matrix.rows(); Index col = matrix.cols(); m_rank = SuiteSparseQR(m_ordering, pivotThreshold, col, &A, &m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc); if (!m_cR) { - m_info = NumericalIssue; + m_info = NumericalIssue; m_isInitialized = false; return; } @@ -139,7 +139,7 @@ class SPQR : public SparseSolverBase > /** * Get the number of rows of the input matrix and the Q matrix */ - inline Index rows() const {return m_cR->nrow; } + inline Index rows() const {return m_rows; } /** * Get the number of columns of the input matrix. @@ -245,6 +245,7 @@ class SPQR : public SparseSolverBase > mutable Index m_rank; // The rank of the matrix mutable cholmod_common m_cc; // Workspace and parameters bool m_useDefaultThreshold; // Use default threshold + Index m_rows; template friend struct SPQR_QProduct; }; diff --git a/test/spqr_support.cpp b/test/spqr_support.cpp index baa25a0c2..81e63b6a5 100644 --- a/test/spqr_support.cpp +++ b/test/spqr_support.cpp @@ -20,8 +20,8 @@ int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows int cols = internal::random(1,rows); double density = (std::max)(8./(rows*cols), 0.01); - A.resize(rows,rows); - dA.resize(rows,rows); + A.resize(rows,cols); + dA.resize(rows,cols); initSparse(density, dA, A,ForceNonZeroDiag); A.makeCompressed(); return rows;