Merged in jeremy_barnes/eigen/shader-model-3.0 (pull request PR-152)

Alternative way of forcing instantiation of device kernels without causing warnings or requiring device to device kernel invocations.
This commit is contained in:
Benoit Steiner 2016-01-11 11:43:37 -08:00
commit 2c3b13eded
3 changed files with 9 additions and 3 deletions

View File

@ -238,9 +238,15 @@ struct GpuDevice {
};
#ifndef __CUDA_ARCH__
#define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
(kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
(kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
assert(cudaGetLastError() == cudaSuccess);
#else
#define LAUNCH_CUDA_KERNEL(kernel, ...) \
{ static const auto __attribute__((__unused__)) __makeTheKernelInstantiate = &(kernel); } \
eigen_assert(false && "Cannot launch a kernel from another kernel" __CUDA_ARCH__);
#endif
// FIXME: Should be device and kernel specific.

View File

@ -505,7 +505,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(CoeffReturnType* data) {
m_impl.evalSubExprsIfNeeded(NULL);
// Use the FullReducer if possible.

View File

@ -116,7 +116,7 @@ struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
template <typename OutputType>
static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) {
assert(false && "Should only be called on floats");
eigen_assert(false && "Should only be called on floats");
}
static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output) {