mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-02-23 18:20:47 +08:00
Improved the performance of reductions on CUDA devices
This commit is contained in:
parent
515dee0baf
commit
cfff40b1d4
@ -337,6 +337,16 @@ struct FullReducer<Self, Op, ThreadPoolDevice, true> {
|
||||
#endif
|
||||
|
||||
|
||||
// Default inner reducer
|
||||
template <typename Self, typename Op, typename Device>
|
||||
struct InnerReducer {
|
||||
static const bool HasOptimizedImplementation = false;
|
||||
|
||||
static EIGEN_DEVICE_FUNC void run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) {
|
||||
assert(false && "Not implemented");
|
||||
}
|
||||
};
|
||||
|
||||
// Default outer reducer
|
||||
template <typename Self, typename Op, typename Device>
|
||||
struct OuterReducer {
|
||||
@ -352,6 +362,9 @@ struct OuterReducer {
|
||||
template <int B, int N, typename S, typename R, typename I>
|
||||
__global__ void FullReductionKernel(R, const S, I, typename S::CoeffReturnType*);
|
||||
|
||||
template <int NPT, typename S, typename R, typename I>
|
||||
__global__ void InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
|
||||
|
||||
template <int NPT, typename S, typename R, typename I>
|
||||
__global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
|
||||
#endif
|
||||
@ -516,6 +529,23 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
// Attempt to use an optimized reduction.
|
||||
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
||||
else if (RunningOnGPU && data && (m_device.majorDeviceVersion() >= 3)) {
|
||||
bool reducing_inner_dims = true;
|
||||
for (int i = 0; i < NumReducedDims; ++i) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
reducing_inner_dims &= m_reducedDims[i];
|
||||
} else {
|
||||
reducing_inner_dims &= m_reducedDims[NumInputDims - 1 - i];
|
||||
}
|
||||
}
|
||||
if (internal::InnerReducer<Self, Op, GpuDevice>::HasOptimizedImplementation &&
|
||||
(reducing_inner_dims || ReducingInnerMostDims)) {
|
||||
const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
|
||||
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
|
||||
Op reducer(m_reducer);
|
||||
internal::InnerReducer<Self, Op, GpuDevice>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool preserving_inner_dims = true;
|
||||
for (int i = 0; i < NumReducedDims; ++i) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
@ -615,6 +645,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
#endif
|
||||
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
||||
template <int B, int N, typename S, typename R, typename I> friend void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*);
|
||||
template <int NPT, typename S, typename R, typename I> friend void internal::InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
|
||||
template <int NPT, typename S, typename R, typename I> friend void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
|
||||
#endif
|
||||
|
||||
|
@ -131,7 +131,102 @@ struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
|
||||
}
|
||||
};
|
||||
|
||||
#define DIVUP(x, y) (((x) + (y)-1) / (y))
|
||||
|
||||
extern __shared__ float temp[];
|
||||
|
||||
template <int NumPerThread, typename Self,
|
||||
typename Reducer, typename Index>
|
||||
__global__ void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
|
||||
typename Self::CoeffReturnType* output) {
|
||||
eigen_assert(blockDim.y == 1);
|
||||
eigen_assert(blockDim.z == 1);
|
||||
eigen_assert(gridDim.y == 1);
|
||||
eigen_assert(gridDim.z == 1);
|
||||
|
||||
const int unroll_times = 16;
|
||||
eigen_assert(NumPerThread % unroll_times == 0);
|
||||
|
||||
const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread);
|
||||
const Index num_input_blocks = input_col_blocks * num_preserved_coeffs;
|
||||
|
||||
const Index num_threads = blockDim.x * gridDim.x;
|
||||
const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
|
||||
output[i] = reducer.initialize();
|
||||
}
|
||||
|
||||
for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
|
||||
const Index row = i / input_col_blocks;
|
||||
|
||||
if (row < num_preserved_coeffs) {
|
||||
const Index col_block = i % input_col_blocks;
|
||||
const Index col_begin = col_block * blockDim.x * NumPerThread + threadIdx.x;
|
||||
|
||||
float reduced_val = reducer.initialize();
|
||||
|
||||
for (Index j = 0; j < NumPerThread; j += unroll_times) {
|
||||
const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1);
|
||||
if (last_col >= num_coeffs_to_reduce) {
|
||||
for (Index col = col_begin + blockDim.x * j; col < num_coeffs_to_reduce; col +=blockDim.x) {
|
||||
const float val = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
|
||||
reducer.reduce(val, &reduced_val);
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
// Faster version of the loop with no branches after unrolling.
|
||||
#pragma unroll
|
||||
for (int k = 0; k < unroll_times; ++k) {
|
||||
const Index col = col_begin + blockDim.x * (j + k);
|
||||
reducer.reduce(input.m_impl.coeff(row * num_coeffs_to_reduce + col), &reduced_val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
temp[threadIdx.x] = reduced_val;
|
||||
|
||||
__syncthreads();
|
||||
const int warp_id = threadIdx.x & 31;
|
||||
if (warp_id < 16) reducer.reduce(temp[threadIdx.x + 16], &temp[threadIdx.x]);
|
||||
if (warp_id < 8) reducer.reduce(temp[threadIdx.x + 8], &temp[threadIdx.x]);
|
||||
if (warp_id < 4) reducer.reduce(temp[threadIdx.x + 4], &temp[threadIdx.x]);
|
||||
if (warp_id < 2) reducer.reduce(temp[threadIdx.x + 2], &temp[threadIdx.x]);
|
||||
if (warp_id < 1) {
|
||||
reducer.reduce(temp[threadIdx.x + 1], &temp[threadIdx.x]);
|
||||
atomicReduce(&(output[row]), temp[threadIdx.x], reducer);
|
||||
}
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Self, typename Op>
|
||||
struct InnerReducer<Self, Op, GpuDevice> {
|
||||
// Unfortunately nvidia doesn't support well exotic types such as complex,
|
||||
// so reduce the scope of the optimized version of the code to the simple case
|
||||
// of floats.
|
||||
static const bool HasOptimizedImplementation = !Op::IsStateful &&
|
||||
internal::is_same<typename Self::CoeffReturnType, float>::value;
|
||||
|
||||
template <typename Device, typename OutputType>
|
||||
static void run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
|
||||
assert(false && "Should only be called to reduce floats on a gpu device");
|
||||
}
|
||||
|
||||
static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
|
||||
typedef typename Self::Index Index;
|
||||
|
||||
const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
|
||||
const int block_size = 256;
|
||||
const int num_per_thread = 128;
|
||||
const int num_blocks = 32;
|
||||
|
||||
LAUNCH_CUDA_KERNEL((InnerReductionKernel<num_per_thread>),
|
||||
num_blocks, block_size, block_size*sizeof(float), device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <int NumPerThread, typename Self,
|
||||
typename Reducer, typename Index>
|
||||
@ -145,7 +240,7 @@ __global__ void OuterReductionKernel(Reducer reducer, const Self input, Index nu
|
||||
}
|
||||
|
||||
// Do the reduction.
|
||||
const Index max_iter = DIVUP(num_coeffs_to_reduce, NumPerThread) * num_preserved_coeffs;
|
||||
const Index max_iter = divup<Index>(num_coeffs_to_reduce, NumPerThread) * num_preserved_coeffs;
|
||||
for (Index i = thread_id; i < max_iter; i += num_threads) {
|
||||
const Index input_col = i % num_preserved_coeffs;
|
||||
const Index input_row = (i / num_preserved_coeffs) * NumPerThread;
|
||||
@ -189,8 +284,6 @@ struct OuterReducer<Self, Op, GpuDevice> {
|
||||
}
|
||||
};
|
||||
|
||||
#undef DIVUP
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user