Optimized outer reduction on GPUs.

This commit is contained in:
Benoit Steiner 2015-12-22 15:06:17 -08:00
parent 3504ae47ca
commit b5d2078c4a
2 changed files with 92 additions and 1 deletions

View File

@ -337,9 +337,23 @@ struct FullReducer<Self, Op, ThreadPoolDevice, true> {
#endif
// Default outer reducer
template <typename Self, typename Op, typename Device>
struct OuterReducer {
static const bool HasOptimizedImplementation = false;
static EIGEN_DEVICE_FUNC void run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) {
assert(false && "Not implemented");
}
};
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
template <int B, int N, typename S, typename R, typename I>
__global__ void FullReductionKernel(R, const S, I, typename S::CoeffReturnType*);
template <int NPT, typename S, typename R, typename I>
__global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
#endif
} // end namespace internal
@ -439,7 +453,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
}
}
}
// Precompute input strides.
if (NumInputDims > 0) {
array<Index, NumInputDims> input_strides;
@ -498,6 +512,28 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
internal::FullReducer<Self, Op, Device>::run(*this, reducer, m_device, data);
return need_assign;
}
// Attempt to use an optimized reduction.
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
else if (RunningOnGPU && data && (m_device.majorDeviceVersion() >= 3)) {
bool preserving_inner_dims = true;
for (int i = 0; i < NumReducedDims; ++i) {
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
preserving_inner_dims &= m_reducedDims[NumInputDims - 1 - i];
} else {
preserving_inner_dims &= m_reducedDims[i];
}
}
if (internal::OuterReducer<Self, Op, GpuDevice>::HasOptimizedImplementation &&
preserving_inner_dims) {
const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
Op reducer(m_reducer);
internal::OuterReducer<Self, Op, GpuDevice>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve);
return false;
}
}
#endif
return true;
}
@ -579,6 +615,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
#endif
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
template <int B, int N, typename S, typename R, typename I> friend void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*);
template <int NPT, typename S, typename R, typename I> friend void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
#endif
// Returns the Index in the input tensor of the first value that needs to be

View File

@ -131,6 +131,60 @@ struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
}
};
template <int NumPerThread, typename Self,
typename Reducer, typename Index>
__global__ void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
typename Self::CoeffReturnType* output) {
const Index num_threads = blockDim.x * gridDim.x;
const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
// Initialize the output values
for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
output[i] = reducer.initialize();
}
// Do the reduction.
const Index max_iter = DIVUP(num_coeffs_to_reduce, NumPerThread) * num_preserved_coeffs;
for (Index i = thread_id; i < max_iter; i += num_threads) {
const Index input_col = i % num_preserved_coeffs;
const Index input_row = (i / num_preserved_coeffs) * NumPerThread;
typename Self::CoeffReturnType reduced_val = reducer.initialize();
const Index max_row = numext::mini(input_row + NumPerThread, num_coeffs_to_reduce);
for (Index j = input_row; j < max_row; j++) {
typename Self::CoeffReturnType val = input.m_impl.coeff(j * num_preserved_coeffs + input_col);
reducer.reduce(val, &reduced_val);
}
atomicReduce(&(output[input_col]), reduced_val, reducer);
}
}
template <typename Self, typename Op>
struct OuterReducer<Self, Op, GpuDevice> {
// Unfortunately nvidia doesn't support well exotic types such as complex,
// so reduce the scope of the optimized version of the code to the simple case
// of floats.
static const bool HasOptimizedImplementation = !Op::IsStateful &&
internal::is_same<typename Self::CoeffReturnType, float>::value;
template <typename Device, typename OutputType>
static void run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
assert(false && "Should only be called to reduce floats on a gpu device");
}
static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
typedef typename Self::Index Index;
const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
const int block_size = 256;
const int num_per_thread = 16;
const int num_blocks = std::ceil(static_cast<float>(num_coeffs) / (block_size * num_per_thread));
LAUNCH_CUDA_KERNEL((OuterReductionKernel<num_per_thread>),
num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
}
};
#endif