Simplified the code that launches cuda kernels.

This commit is contained in:
Benoit Steiner 2016-04-19 14:55:21 -07:00
parent b9ea40c30d
commit 7129d998db
3 changed files with 7 additions and 16 deletions

View File

@ -291,18 +291,9 @@ struct GpuDevice {
int max_blocks_;
};
#if !defined(__CUDA_ARCH__)
#define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
(kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
assert(cudaGetLastError() == cudaSuccess);
#elif __CUDA_ARCH__ >= 350
#define LAUNCH_CUDA_KERNEL(kernel, ...) \
{ const auto __attribute__((__unused__)) __makeTheKernelInstantiate = &(kernel); } \
eigen_assert(false && "Cannot launch a kernel from another kernel" __CUDA_ARCH__ kernel);
#else
#define LAUNCH_CUDA_KERNEL(kernel, ...) \
eigen_assert(false && "Cannot launch a kernel from another kernel" __CUDA_ARCH__ kernel);
#endif
// FIXME: Should be device and kernel specific.

View File

@ -193,7 +193,7 @@ struct EigenMetaKernelEval {
template <typename Evaluator, typename Index>
struct EigenMetaKernelEval<Evaluator, Index, true> {
static __device__ EIGEN_ALWAYS_INLINE
void run(Evaluator eval, Index first, Index last, Index step_size) {
void run(Evaluator& eval, Index first, Index last, Index step_size) {
const Index PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
const Index vectorized_size = (last / PacketSize) * PacketSize;
const Index vectorized_step_size = step_size * PacketSize;

View File

@ -126,11 +126,11 @@ struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
internal::is_same<typename Self::CoeffReturnType, float>::value;
template <typename OutputType>
static EIGEN_DEVICE_FUNC void run(const Self&, Op&, const GpuDevice&, OutputType*) {
static void run(const Self&, Op&, const GpuDevice&, OutputType*) {
assert(false && "Should only be called on floats");
}
static EIGEN_DEVICE_FUNC void run(const Self& self, Op& reducer, const GpuDevice& device, float* output) {
static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output) {
typedef typename Self::Index Index;
const Index num_coeffs = array_prod(self.m_impl.dimensions());
@ -226,12 +226,12 @@ struct InnerReducer<Self, Op, GpuDevice> {
internal::is_same<typename Self::CoeffReturnType, float>::value;
template <typename Device, typename OutputType>
static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
static bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
assert(false && "Should only be called to reduce floats on a gpu device");
return true;
}
static EIGEN_DEVICE_FUNC bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
typedef typename Self::Index Index;
// It's faster to use the usual code.
@ -305,12 +305,12 @@ struct OuterReducer<Self, Op, GpuDevice> {
internal::is_same<typename Self::CoeffReturnType, float>::value;
template <typename Device, typename OutputType>
static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
static bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
assert(false && "Should only be called to reduce floats on a gpu device");
return true;
}
static EIGEN_DEVICE_FUNC bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
typedef typename Self::Index Index;
// It's faster to use the usual code.