Merged in rryan/eigen/tensorfunctors (pull request PR-233)

Fully support complex types in SumReducer and MeanReducer when building for CUDA by using scalar_sum_op and scalar_product_op instead of operator+ and operator*.
This commit is contained in:
Benoit Steiner 2016-10-06 12:29:19 -07:00
commit 33fba3f08d
2 changed files with 46 additions and 4 deletions

View File

@ -124,7 +124,8 @@ template <typename T> struct SumReducer
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
return saccum + predux(vaccum);
internal::scalar_sum_op<T> sum_op;
return sum_op(saccum, predux(vaccum));
}
};
@ -173,7 +174,8 @@ template <typename T> struct MeanReducer
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
return (saccum + predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits<Packet>::size);
internal::scalar_sum_op<T> sum_op;
return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits<Packet>::size);
}
protected:
@ -304,7 +306,8 @@ template <typename T> struct ProdReducer
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
(*accum) *= t;
internal::scalar_product_op<T> prod_op;
(*accum) = prod_op(*accum, t);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
@ -328,7 +331,8 @@ template <typename T> struct ProdReducer
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
return saccum * predux_mul(vaccum);
internal::scalar_product_op<T> prod_op;
return prod_op(saccum, predux_mul(vaccum));
}
};

View File

@ -108,8 +108,46 @@ static void test_cuda_sum_reductions() {
}
static void test_cuda_product_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.prod();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.prod();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
void test_cxx11_tensor_complex()
{
CALL_SUBTEST(test_cuda_nullary());
CALL_SUBTEST(test_cuda_sum_reductions());
CALL_SUBTEST(test_cuda_product_reductions());
}