mirror of
https://gitlab.com/libeigen/eigen.git
synced 2024-12-21 07:19:46 +08:00
Fixed a bug impacting some outer reductions on GPU
This commit is contained in:
parent
5f50f12d2c
commit
028e299577
@ -505,9 +505,14 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
(reducing_inner_dims || ReducingInnerMostDims)) {
|
||||
const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
|
||||
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
|
||||
if (!data && num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 128) {
|
||||
data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
|
||||
m_result = data;
|
||||
if (!data) {
|
||||
if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 128) {
|
||||
data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
|
||||
m_result = data;
|
||||
}
|
||||
else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
Op reducer(m_reducer);
|
||||
if (internal::InnerReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) {
|
||||
@ -533,9 +538,14 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
preserving_inner_dims) {
|
||||
const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
|
||||
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
|
||||
if (!data && num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 32) {
|
||||
data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
|
||||
m_result = data;
|
||||
if (!data) {
|
||||
if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 32) {
|
||||
data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
|
||||
m_result = data;
|
||||
}
|
||||
else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
Op reducer(m_reducer);
|
||||
if (internal::OuterReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) {
|
||||
@ -556,6 +566,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
m_impl.cleanup();
|
||||
if (m_result) {
|
||||
m_device.deallocate(m_result);
|
||||
m_result = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,9 +56,102 @@ static void test_full_reductions() {
|
||||
gpu_device.deallocate(gpu_out_ptr);
|
||||
}
|
||||
|
||||
template<typename Type, int DataLayout>
|
||||
static void test_first_dim_reductions() {
|
||||
int dim_x = 33;
|
||||
int dim_y = 1;
|
||||
int dim_z = 128;
|
||||
|
||||
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
|
||||
in.setRandom();
|
||||
|
||||
Eigen::array<int, 1> red_axis;
|
||||
red_axis[0] = 0;
|
||||
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
|
||||
|
||||
// Create device
|
||||
Eigen::CudaStreamDevice stream;
|
||||
Eigen::GpuDevice dev(&stream);
|
||||
|
||||
// Create data(T)
|
||||
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
|
||||
Type* out_data = (Type*)dev.allocate(dim_z*dim_y*sizeof(Type));
|
||||
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
|
||||
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_y, dim_z);
|
||||
|
||||
// Perform operation
|
||||
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
|
||||
gpu_out.device(dev) = gpu_in.sum(red_axis);
|
||||
gpu_out.device(dev) += gpu_in.sum(red_axis);
|
||||
Tensor<Type, 2, DataLayout> redux_gpu(dim_y, dim_z);
|
||||
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
|
||||
dev.synchronize();
|
||||
|
||||
// Check that the CPU and GPU reductions return the same result.
|
||||
for (int i = 0; i < gpu_out.size(); ++i) {
|
||||
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
|
||||
}
|
||||
|
||||
dev.deallocate(in_data);
|
||||
dev.deallocate(out_data);
|
||||
}
|
||||
|
||||
template<typename Type, int DataLayout>
|
||||
static void test_last_dim_reductions() {
|
||||
int dim_x = 128;
|
||||
int dim_y = 1;
|
||||
int dim_z = 33;
|
||||
|
||||
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
|
||||
in.setRandom();
|
||||
|
||||
Eigen::array<int, 1> red_axis;
|
||||
red_axis[0] = 2;
|
||||
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
|
||||
|
||||
// Create device
|
||||
Eigen::CudaStreamDevice stream;
|
||||
Eigen::GpuDevice dev(&stream);
|
||||
|
||||
// Create data
|
||||
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
|
||||
Type* out_data = (Type*)dev.allocate(dim_x*dim_y*sizeof(Type));
|
||||
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
|
||||
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_x, dim_y);
|
||||
|
||||
// Perform operation
|
||||
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
|
||||
gpu_out.device(dev) = gpu_in.sum(red_axis);
|
||||
gpu_out.device(dev) += gpu_in.sum(red_axis);
|
||||
Tensor<Type, 2, DataLayout> redux_gpu(dim_x, dim_y);
|
||||
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
|
||||
dev.synchronize();
|
||||
|
||||
// Check that the CPU and GPU reductions return the same result.
|
||||
for (int i = 0; i < gpu_out.size(); ++i) {
|
||||
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
|
||||
}
|
||||
|
||||
dev.deallocate(in_data);
|
||||
dev.deallocate(out_data);
|
||||
}
|
||||
|
||||
|
||||
void test_cxx11_tensor_reduction_cuda() {
|
||||
CALL_SUBTEST_1((test_full_reductions<float, ColMajor>()));
|
||||
CALL_SUBTEST_1((test_full_reductions<double, ColMajor>()));
|
||||
CALL_SUBTEST_2((test_full_reductions<float, RowMajor>()));
|
||||
CALL_SUBTEST_2((test_full_reductions<double, RowMajor>()));
|
||||
|
||||
CALL_SUBTEST_3((test_first_dim_reductions<float, ColMajor>()));
|
||||
CALL_SUBTEST_3((test_first_dim_reductions<double, ColMajor>()));
|
||||
CALL_SUBTEST_4((test_first_dim_reductions<float, RowMajor>()));
|
||||
// Outer reductions of doubles aren't supported just yet.
|
||||
// CALL_SUBTEST_4((test_first_dim_reductions<double, RowMajor>()))
|
||||
|
||||
CALL_SUBTEST_5((test_last_dim_reductions<float, ColMajor>()));
|
||||
// Outer reductions of doubles aren't supported just yet.
|
||||
// CALL_SUBTEST_5((test_last_dim_reductions<double, ColMajor>()));
|
||||
CALL_SUBTEST_6((test_last_dim_reductions<float, RowMajor>()));
|
||||
CALL_SUBTEST_6((test_last_dim_reductions<double, RowMajor>()));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user