From e2e9cdd16970914cf0a892fea5e7c4402b3ede41 Mon Sep 17 00:00:00 2001 From: RJ Ryan Date: Thu, 6 Oct 2016 10:49:48 -0700 Subject: [PATCH 1/2] Fully support complex types in SumReducer and MeanReducer when building for CUDA by using scalar_sum_op and scalar_product_op instead of operator+ and operator*. --- unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h index 7164e8d60..d73f6dc68 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h @@ -124,7 +124,8 @@ template struct SumReducer } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { - return saccum + predux(vaccum); + internal::scalar_sum_op sum_op; + return sum_op(saccum, predux(vaccum)); } }; @@ -173,7 +174,8 @@ template struct MeanReducer } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { - return (saccum + predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits::size); + internal::scalar_sum_op sum_op; + return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits::size); } protected: @@ -304,7 +306,8 @@ template struct ProdReducer static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { - (*accum) *= t; + internal::scalar_product_op prod_op; + (*accum) = prod_op(*accum, t); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { @@ -328,7 +331,8 @@ template struct ProdReducer } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { - return saccum * predux_mul(vaccum); + internal::scalar_product_op prod_op; + return prod_op(saccum, predux_mul(vaccum)); } }; From bfc264abe86541632d17b146a8601a6999a0f8d6 Mon Sep 17 00:00:00 2001 From: RJ Ryan Date: Thu, 6 Oct 2016 11:10:14 -0700 Subject: [PATCH 2/2] Add a test that GPU complex product reductions match CPU reductions. --- unsupported/test/cxx11_tensor_complex_cuda.cu | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/unsupported/test/cxx11_tensor_complex_cuda.cu b/unsupported/test/cxx11_tensor_complex_cuda.cu index f895efd01..d4e111f5d 100644 --- a/unsupported/test/cxx11_tensor_complex_cuda.cu +++ b/unsupported/test/cxx11_tensor_complex_cuda.cu @@ -108,8 +108,46 @@ static void test_cuda_sum_reductions() { } +static void test_cuda_product_reductions() { + + Eigen::CudaStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + const int num_rows = internal::random(1024, 5*1024); + const int num_cols = internal::random(1024, 5*1024); + + Tensor, 2> in(num_rows, num_cols); + in.setRandom(); + + Tensor, 0> full_redux; + full_redux = in.prod(); + + std::size_t in_bytes = in.size() * sizeof(std::complex); + std::size_t out_bytes = full_redux.size() * sizeof(std::complex); + std::complex* gpu_in_ptr = static_cast*>(gpu_device.allocate(in_bytes)); + std::complex* gpu_out_ptr = static_cast*>(gpu_device.allocate(out_bytes)); + gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); + + TensorMap, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); + TensorMap, 0> > out_gpu(gpu_out_ptr); + + out_gpu.device(gpu_device) = in_gpu.prod(); + + Tensor, 0> full_redux_gpu; + gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); + gpu_device.synchronize(); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); + + gpu_device.deallocate(gpu_in_ptr); + gpu_device.deallocate(gpu_out_ptr); +} + + void test_cxx11_tensor_complex() { CALL_SUBTEST(test_cuda_nullary()); CALL_SUBTEST(test_cuda_sum_reductions()); + CALL_SUBTEST(test_cuda_product_reductions()); }