Updates corresponding to the latest round of PR feedback

The major changes are

1. Moving CUDA/PacketMath.h to GPU/PacketMath.h
2. Moving CUDA/MathFunctions.h to GPU/MathFunction.h
3. Moving CUDA/CudaSpecialFunctions.h to GPU/GpuSpecialFunctions.h
    The above three changes effectively enable the Eigen "Packet" layer for the HIP platform

4. Merging the "hip_basic" and "cuda_basic" unit tests into one ("gpu_basic")
5. Updating the "EIGEN_DEVICE_FUNC" marking in some places

The change has been tested on the HIP and CUDA platforms.
This commit is contained in:
Deven Desai 2018-07-11 10:39:54 -04:00
parent 1fe0b74904
commit 876f392c39
18 changed files with 158 additions and 285 deletions

View File

@ -81,6 +81,7 @@
// clang++ always considers constexpr functions as implicitly __host__ __device__
#define EIGEN_CONSTEXPR_ARE_DEVICE_FUNC
#endif
#endif
#elif defined(EIGEN_HIPCC)
// Do not try to vectorize on HIP
@ -92,7 +93,7 @@
// We need hip_runtime.h to ensure that that EIGEN_USING_STD_MATH macro
// works properly on the device side
#include <hip/hip_runtime.h>
#if defined(__HIP_DEVICE_COMPILE__) && !defined(EIGEN_NO_HIP)
// analogous to EIGEN_CUDA_ARCH, but for HIP
#define EIGEN_HIP_DEVICE_COMPILE __HIP_DEVICE_COMPILE__
@ -356,7 +357,7 @@
#endif
#if defined EIGEN_CUDACC
#define EIGEN_VECTORIZE_CUDA
#define EIGEN_VECTORIZE_GPU
#include <vector_types.h>
#if EIGEN_CUDACC_VER >= 70500
#define EIGEN_HAS_CUDA_FP16
@ -369,14 +370,20 @@
#endif
#if defined(EIGEN_HIPCC) && defined(EIGEN_HIP_DEVICE_COMPILE)
#define EIGEN_VECTORIZE_GPU
#include <hip/hip_vector_types.h>
#define EIGEN_HAS_HIP_FP16
#include <hip/hip_fp16.h>
#define HIP_PATCH_WITH_NEW_FP16 18215
#if (HIP_VERSION_PATCH < HIP_PATCH_WITH_NEW_FP16)
#define EIGEN_HAS_OLD_HIP_FP16
// Old HIP implementation does not have a explicit typedef for "half2"
typedef __half2 half2;
#endif
#endif
#if defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)
@ -550,9 +557,9 @@ using std::ptrdiff_t;
#include "src/Core/arch/GPU/PacketMathHalf.h"
#include "src/Core/arch/GPU/TypeCasting.h"
#if defined EIGEN_VECTORIZE_CUDA
#include "src/Core/arch/CUDA/PacketMath.h"
#include "src/Core/arch/CUDA/MathFunctions.h"
#if defined EIGEN_VECTORIZE_GPU
#include "src/Core/arch/GPU/PacketMath.h"
#include "src/Core/arch/GPU/MathFunctions.h"
#endif
#include "src/Core/arch/Default/Settings.h"

View File

@ -982,7 +982,12 @@ template<>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE long double mini(const long double& x, const long double& y)
{
#if defined(EIGEN_HIPCC)
// no "fminl" on HIP yet
return (x < y) ? x : y;
#else
return fminl(x, y);
#endif
}
template<typename T>
@ -1007,7 +1012,12 @@ template<>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE long double maxi(const long double& x, const long double& y)
{
#if defined(EIGEN_HIPCC)
// no "fmaxl" on HIP yet
return (x > y) ? x : y;
#else
return fmaxl(x, y);
#endif
}
#endif

View File

@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATH_FUNCTIONS_CUDA_H
#define EIGEN_MATH_FUNCTIONS_CUDA_H
#ifndef EIGEN_MATH_FUNCTIONS_GPU_H
#define EIGEN_MATH_FUNCTIONS_GPU_H
namespace Eigen {
@ -17,7 +17,7 @@ namespace internal {
// Make sure this is only available when targeting a GPU: we don't want to
// introduce conflicts between these packet_traits definitions and the ones
// we'll use on the host side (SSE, AVX, ...)
#if defined(EIGEN_CUDACC) && defined(EIGEN_USE_GPU)
#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
float4 plog<float4>(const float4& a)
{
@ -100,4 +100,4 @@ double2 prsqrt<double2>(const double2& a)
} // end namespace Eigen
#endif // EIGEN_MATH_FUNCTIONS_CUDA_H
#endif // EIGEN_MATH_FUNCTIONS_GPU_H

View File

@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PACKET_MATH_CUDA_H
#define EIGEN_PACKET_MATH_CUDA_H
#ifndef EIGEN_PACKET_MATH_GPU_H
#define EIGEN_PACKET_MATH_GPU_H
namespace Eigen {
@ -17,7 +17,7 @@ namespace internal {
// Make sure this is only available when targeting a GPU: we don't want to
// introduce conflicts between these packet_traits definitions and the ones
// we'll use on the host side (SSE, AVX, ...)
#if defined(EIGEN_CUDACC) && defined(EIGEN_USE_GPU)
#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
template<> struct is_arithmetic<float4> { enum { value = true }; };
template<> struct is_arithmetic<double2> { enum { value = true }; };
@ -338,4 +338,4 @@ ptranspose(PacketBlock<double2,2>& kernel) {
} // end namespace Eigen
#endif // EIGEN_PACKET_MATH_CUDA_H
#endif // EIGEN_PACKET_MATH_GPU_H

View File

@ -48,7 +48,7 @@ typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
EIGEN_DONT_INLINE static void run(
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
@ -57,7 +57,7 @@ EIGEN_DONT_INLINE static void run(
};
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols,
const LhsMapper& alhs,
const RhsMapper& rhs,
@ -231,7 +231,7 @@ typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
EIGEN_DONT_INLINE static void run(
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
@ -240,7 +240,7 @@ EIGEN_DONT_INLINE static void run(
};
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols,
const LhsMapper& alhs,
const RhsMapper& rhs,

View File

@ -399,7 +399,7 @@ if(CUDA_FOUND)
cuda_include_directories(${CMAKE_CURRENT_BINARY_DIR})
set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
ei_add_test(cuda_basic)
ei_add_test(gpu_basic)
unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
@ -429,7 +429,7 @@ if (EIGEN_TEST_HIP)
include_directories(${HIP_PATH}/include)
set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
ei_add_test(hip_basic)
ei_add_test(gpu_basic)
unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
elseif (${HIP_PLATFORM} STREQUAL "nvcc")

View File

@ -15,13 +15,11 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cuda_basic
#define EIGEN_TEST_FUNC gpu_basic
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#include <math_constants.h>
#include <cuda.h>
#include "main.h"
#include "cuda_common.h"
#include "gpu_common.h"
// Check that dense modules can be properly parsed by nvcc
#include <Eigen/Dense>
@ -164,40 +162,51 @@ struct matrix_inverse {
}
};
void test_cuda_basic()
void test_gpu_basic()
{
ei_test_init_cuda();
ei_test_init_gpu();
int nthreads = 100;
Eigen::VectorXf in, out;
#ifndef __CUDA_ARCH__
#if !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__)
int data_size = nthreads * 512;
in.setRandom(data_size);
out.setRandom(data_size);
#endif
CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Vector3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Array44f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(replicate<Array4f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(replicate<Array33f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(redux<Array4f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(redux<Matrix3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(prod_test<Matrix3f,Matrix3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(prod_test<Matrix4f,Vector4f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix3f,Vector3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise<Vector3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise<Array44f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(matrix_inverse<Matrix2f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(matrix_inverse<Matrix3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(matrix_inverse<Matrix4f>(), nthreads, in, out) );
#if !defined(EIGEN_USE_HIP)
// FIXME
// These subtests result in a compile failure on the HIP platform
//
// eigen-upstream/Eigen/src/Core/Replicate.h:61:65: error:
// base class 'internal::dense_xpr_base<Replicate<Array<float, 4, 1, 0, 4, 1>, -1, -1> >::type'
// (aka 'ArrayBase<Eigen::Replicate<Eigen::Array<float, 4, 1, 0, 4, 1>, -1, -1> >') has protected default constructor
CALL_SUBTEST( run_and_compare_to_gpu(replicate<Array4f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(replicate<Array33f>(), nthreads, in, out) );
#endif
CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues_direct<Matrix3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues_direct<Matrix2f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix4f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(redux<Array4f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(redux<Matrix3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(prod_test<Matrix3f,Matrix3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(prod_test<Matrix4f,Vector4f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(diagonal<Matrix3f,Vector3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix2f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix4f>(), nthreads, in, out) );
#if !defined(EIGEN_USE_HIP)
// FIXME
// These subtests result in a linking error on the HIP platform
CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct<Matrix3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct<Matrix2f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues<Matrix4f>(), nthreads, in, out) );
#endif
}

View File

@ -1,13 +1,22 @@
#ifndef EIGEN_TEST_CUDA_COMMON_H
#define EIGEN_TEST_CUDA_COMMON_H
#ifndef EIGEN_TEST_GPU_COMMON_H
#define EIGEN_TEST_GPU_COMMON_H
#ifdef EIGEN_USE_HIP
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#else
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#endif
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <iostream>
#ifndef __CUDACC__
#define EIGEN_USE_GPU
#include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
#if !defined(__CUDACC__) && !defined(__HIPCC__)
dim3 threadIdx, blockDim, blockIdx;
#endif
@ -21,7 +30,7 @@ void run_on_cpu(const Kernel& ker, int n, const Input& in, Output& out)
template<typename Kernel, typename Input, typename Output>
__global__
void run_on_cuda_meta_kernel(const Kernel ker, int n, const Input* in, Output* out)
void run_on_gpu_meta_kernel(const Kernel ker, int n, const Input* in, Output* out)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i<n) {
@ -31,61 +40,70 @@ void run_on_cuda_meta_kernel(const Kernel ker, int n, const Input* in, Output* o
template<typename Kernel, typename Input, typename Output>
void run_on_cuda(const Kernel& ker, int n, const Input& in, Output& out)
void run_on_gpu(const Kernel& ker, int n, const Input& in, Output& out)
{
typename Input::Scalar* d_in;
typename Output::Scalar* d_out;
std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar);
std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar);
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out), out_bytes);
gpuMalloc((void**)(&d_in), in_bytes);
gpuMalloc((void**)(&d_out), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out.data(), out_bytes, cudaMemcpyHostToDevice);
gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice);
gpuMemcpy(d_out, out.data(), out_bytes, gpuMemcpyHostToDevice);
// Simple and non-optimal 1D mapping assuming n is not too large
// That's only for unit testing!
dim3 Blocks(128);
dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) );
cudaThreadSynchronize();
run_on_cuda_meta_kernel<<<Grids,Blocks>>>(ker, n, d_in, d_out);
cudaThreadSynchronize();
gpuDeviceSynchronize();
#ifdef EIGEN_USE_HIP
hipLaunchKernelGGL(run_on_gpu_meta_kernel<Kernel,
typename std::decay<decltype(*d_in)>::type,
typename std::decay<decltype(*d_out)>::type>,
dim3(Grids), dim3(Blocks), 0, 0, ker, n, d_in, d_out);
#else
run_on_gpu_meta_kernel<<<Grids,Blocks>>>(ker, n, d_in, d_out);
#endif
gpuDeviceSynchronize();
// check inputs have not been modified
cudaMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost);
gpuMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, gpuMemcpyDeviceToHost);
gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
gpuFree(d_in);
gpuFree(d_out);
}
template<typename Kernel, typename Input, typename Output>
void run_and_compare_to_cuda(const Kernel& ker, int n, const Input& in, Output& out)
void run_and_compare_to_gpu(const Kernel& ker, int n, const Input& in, Output& out)
{
Input in_ref, in_cuda;
Output out_ref, out_cuda;
#ifndef __CUDA_ARCH__
in_ref = in_cuda = in;
out_ref = out_cuda = out;
Input in_ref, in_gpu;
Output out_ref, out_gpu;
#if !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__)
in_ref = in_gpu = in;
out_ref = out_gpu = out;
#endif
run_on_cpu (ker, n, in_ref, out_ref);
run_on_cuda(ker, n, in_cuda, out_cuda);
#ifndef __CUDA_ARCH__
VERIFY_IS_APPROX(in_ref, in_cuda);
VERIFY_IS_APPROX(out_ref, out_cuda);
run_on_gpu(ker, n, in_gpu, out_gpu);
#if !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__)
VERIFY_IS_APPROX(in_ref, in_gpu);
VERIFY_IS_APPROX(out_ref, out_gpu);
#endif
}
void ei_test_init_cuda()
void ei_test_init_gpu()
{
int device = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
std::cout << "CUDA device info:\n";
gpuDeviceProp_t deviceProp;
gpuGetDeviceProperties(&deviceProp, device);
std::cout << "GPU device info:\n";
std::cout << " name: " << deviceProp.name << "\n";
std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n";
std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n";
@ -98,4 +116,4 @@ void ei_test_init_cuda()
std::cout << " computeMode: " << deviceProp.computeMode << "\n";
}
#endif // EIGEN_TEST_CUDA_COMMON_H
#endif // EIGEN_TEST_GPU_COMMON_H

View File

@ -28,6 +28,20 @@ class TensorContractionBlocking {
typedef typename LhsMapper::Scalar LhsScalar;
typedef typename RhsMapper::Scalar RhsScalar;
/*
adding EIGEN_DEVICE_FUNC unconditionally to 'TensorContractionBlocking' constructor in `TensorContractionBlocking.h`
requires adding EIGEN_DEVICE_FUNC to `computeProductBlockingSizes` in `GeneralBlockPanelKernel.h`
which in turn, requires adding EIGEN_DEVICE_FUNC to `evaluateProductBlockingSizesHeuristic` in `GeneralBlockPanelKernel.h`
which in turn, requires adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h`
(else HIPCC will error out)
However adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h`
results in NVCC erroring out with the following error
../Eigen/src/Core/products/GeneralBlockPanelKernel.h(57): error #2901:
dynamic initialization is not supported for function-scope static variables within a __device__/__global__ function
*/
#if !defined(EIGEN_HIPCC)
EIGEN_DEVICE_FUNC
#endif

View File

@ -546,45 +546,6 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
results[i].x = results[i].y = results[i].z = results[i].w = 0;
}
#if defined(EIGEN_HIPCC)
#define prefetch_lhs(reg, row, col) \
if (!CHECK_LHS_BOUNDARY) { \
if (col < k_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
reg.z =lhs(row + 2, col); \
reg.w =lhs(row + 3, col); \
} \
} else { \
if (col < k_size) { \
if (row + 3 < m_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
reg.z =lhs(row + 2, col); \
reg.w =lhs(row + 3, col); \
} else if (row + 2 < m_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
reg.z =lhs(row + 2, col); \
} else if (row + 1 < m_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
} else if (row < m_size) { \
reg.x =lhs(row + 0, col); \
} \
} \
} \
#define prefetch_rhs_hipcc(reg, row, col) \
reg.x =rhs(row + 0, col); \
reg.y =rhs(row + 1, col); \
reg.z =rhs(row + 2, col); \
reg.w =rhs(row + 3, col); \
#else
#define prefetch_lhs(reg, row, col) \
if (!CHECK_LHS_BOUNDARY) { \
if (col < k_size) { \
@ -607,19 +568,12 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
} \
} \
#endif
Index lhs_vert = base_m+threadIdx.x*4;
for (Index k = 0; k < k_size; k += 16) {
#if defined(EIGEN_HIPCC)
lhs_pf0 = make_float4(0, 0, 0, 0);
rhs_pf0 = make_float4(0, 0, 0, 0);
#else
lhs_pf0 = internal::pset1<float4>(0);
rhs_pf0 = internal::pset1<float4>(0);
#endif
Index lhs_horiz = threadIdx.y+k;
prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz)
@ -630,11 +584,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
#if defined(EIGEN_HIPCC)
prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
#endif
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@ -649,11 +599,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
} else {
if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
#if defined(EIGEN_HIPCC)
prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
#endif
} else if ((rhs_vert + 2) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
@ -753,10 +699,6 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
#undef prefetch_lhs
#undef add_vals
#if defined(EIGEN_HIPCC)
#undef prefetch_rhs_hipcc
#endif
Index horiz_base = threadIdx.y*4+base_n;
if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
for (int i = 0; i < 4; i++) {
@ -845,33 +787,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
results[i].x = results[i].y = results[i].z = results[i].w = 0;
}
#if defined(EIGEN_HIPCC)
#define prefetch_lhs_hipcc(reg, row, col) \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
reg.z =lhs(row + 2, col); \
reg.w =lhs(row + 3, col);
#define prefetch_rhs_hipcc(reg, row, col) \
reg.x =rhs(row + 0, col); \
reg.y =rhs(row + 1, col); \
reg.z =rhs(row + 2, col); \
reg.w =rhs(row + 3, col);
#endif
Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32;
for (Index k = 0; k < k_size; k += 32) {
#if defined(EIGEN_HIPCC)
lhs_pf0 = make_float4(0, 0, 0, 0);
lhs_pf1 = make_float4(0, 0, 0, 0);
lhs_pf2 = make_float4(0, 0, 0, 0);
lhs_pf3 = make_float4(0, 0, 0, 0);
rhs_pf0 = make_float4(0, 0, 0, 0);
rhs_pf1 = make_float4(0, 0, 0, 0);
#else
lhs_pf0 = internal::pset1<float4>(0);
lhs_pf1 = internal::pset1<float4>(0);
lhs_pf2 = internal::pset1<float4>(0);
@ -879,85 +796,40 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
rhs_pf0 = internal::pset1<float4>(0);
rhs_pf1 = internal::pset1<float4>(0);
#endif
if (!CHECK_LHS_BOUNDARY) {
if ((threadIdx.y/4+k+24) < k_size) {
#if defined(EIGEN_HIPCC)
prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16))
prefetch_lhs_hipcc(lhs_pf3, lhs_vert, (threadIdx.y/4+k+24))
#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
lhs_pf3 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
#endif
} else if ((threadIdx.y/4+k+16) < k_size) {
#if defined(EIGEN_HIPCC)
prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16))
#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
#endif
} else if ((threadIdx.y/4+k+8) < k_size) {
#if defined(EIGEN_HIPCC)
prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
#endif
} else if ((threadIdx.y/4+k) < k_size) {
#if defined(EIGEN_HIPCC)
prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
#endif
}
} else {
// just CHECK_LHS_BOUNDARY
if (lhs_vert + 3 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
#if defined(EIGEN_HIPCC)
prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16))
prefetch_lhs_hipcc(lhs_pf3, lhs_vert, (threadIdx.y/4+k+24))
#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
lhs_pf3 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
#endif
} else if ((threadIdx.y/4+k+16) < k_size) {
#if defined(EIGEN_HIPCC)
prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16))
#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
#endif
} else if ((threadIdx.y/4+k+8) < k_size) {
#if defined(EIGEN_HIPCC)
prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
#endif
} else if ((threadIdx.y/4+k) < k_size) {
#if defined(EIGEN_HIPCC)
prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
#endif
}
} else if (lhs_vert + 2 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
@ -1046,13 +918,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
#if defined(EIGEN_HIPCC)
prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
prefetch_rhs_hipcc(rhs_pf1, rhs_vert, rhs_horiz1)
#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
rhs_pf1 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
#endif
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@ -1074,13 +941,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (rhs_horiz1 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
#if defined(EIGEN_HIPCC)
prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
prefetch_rhs_hipcc(rhs_pf1, rhs_vert, rhs_horiz1)
#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
rhs_pf1 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
#endif
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@ -1101,11 +963,7 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
} else if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
#if defined(EIGEN_HIPCC)
prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
#endif
} else if ((rhs_vert + 2) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@ -1213,11 +1071,6 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
__syncthreads();
} // end loop over k
#if defined(EIGEN_HIPCC)
#undef prefetch_lhs_hipcc
#undef prefetch_rhs_hipcc
#endif
__syncthreads();
Index horiz_base = (threadIdx.y/4)*8+base_n;
if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {

View File

@ -32,8 +32,7 @@
#define gpuGetDeviceCount hipGetDeviceCount
#define gpuGetErrorString hipGetErrorString
#define gpuGetDeviceProperties hipGetDeviceProperties
// FIXME : use hipStreamDefault instead of 0x00
#define gpuStreamDefault 0x00
#define gpuStreamDefault hipStreamDefault
#define gpuGetDevice hipGetDevice
#define gpuSetDevice hipSetDevice
#define gpuMalloc hipMalloc
@ -47,6 +46,7 @@
#define gpuSharedMemConfig hipSharedMemConfig
#define gpuDeviceSetSharedMemConfig hipDeviceSetSharedMemConfig
#define gpuStreamSynchronize hipStreamSynchronize
#define gpuDeviceSynchronize hipDeviceSynchronize
#define gpuMemcpy hipMemcpy
#else
@ -73,6 +73,7 @@
#define gpuSharedMemConfig cudaSharedMemConfig
#define gpuDeviceSetSharedMemConfig cudaDeviceSetSharedMemConfig
#define gpuStreamSynchronize cudaStreamSynchronize
#define gpuDeviceSynchronize cudaDeviceSynchronize
#define gpuMemcpy cudaMemcpy
#endif

View File

@ -32,6 +32,7 @@
#undef gpuSharedMemConfig
#undef gpuDeviceSetSharedMemConfig
#undef gpuStreamSynchronize
#undef gpuDeviceSynchronize
#undef gpuMemcpy
#undef EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H

View File

@ -351,10 +351,7 @@ struct IndexPairList : internal::IndexTuple<FirstType, OtherTypes...> {
namespace internal {
template<typename FirstType, typename... OtherTypes>
#if defined(EIGEN_HIPCC)
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
#endif
size_t array_prod(const IndexList<FirstType, OtherTypes...>& sizes) {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t array_prod(const IndexList<FirstType, OtherTypes...>& sizes) {
size_t result = 1;
for (int i = 0; i < array_size<IndexList<FirstType, OtherTypes...> >::value; ++i) {
result *= sizes[i];

View File

@ -859,10 +859,7 @@ struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices,
return inputIndex;
}
#if defined(EIGEN_HIPCC)
EIGEN_DEVICE_FUNC
#endif
static EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) {
#ifndef __SYCL_DEVICE_ONLY__
return numext::maxi(min, numext::mini(max,value));
#else

View File

@ -497,6 +497,9 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
EIGEN_STRONG_INLINE
#if !defined(EIGEN_HIPCC)
// Marking this as EIGEN_DEVICE_FUNC for HIPCC requires also doing the same for all the functions
// being called within here, which then leads to proliferation of EIGEN_DEVICE_FUNC markings, one
// of which will eventually result in an NVCC error
EIGEN_DEVICE_FUNC
#endif
bool evalSubExprsIfNeeded(typename MakePointer_<CoeffReturnType>::Type data) {
@ -778,17 +781,9 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
// Indexed by reduced dimensions.
array<Index, NumReducedDims> m_reducedDims;
#if defined(EIGEN_HIPCC)
public:
#endif
// Evaluator for the input expression.
TensorEvaluator<ArgType, Device> m_impl;
#if defined(EIGEN_HIPCC)
private:
#endif
// Operation to apply for computing the reduction.
Op m_reducer;

View File

@ -169,7 +169,9 @@ __global__ void FullReductionKernel(Reducer reducer, const Self input, Index num
#pragma unroll
for (int offset = warpSize/2; offset > 0; offset /= 2) {
#if defined(EIGEN_HIPCC)
// XXX use std::is_floating_point to determine the type of accum
// use std::is_floating_point to determine the type of reduced_val
// This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error
// and list the float and int versions of __shfl_down as the candidate functions.
if (std::is_floating_point<typename Self::CoeffReturnType>::value) {
reducer.reduce(__shfl_down(static_cast<float>(accum), offset, warpSize), &accum);
} else {
@ -238,20 +240,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input,
// Initialize the output value if it wasn't initialized by the ReductionInitKernel
#if defined(EIGEN_HIPCC)
if (gridDim.x == 1 && first_index == 0) {
if (num_coeffs % 2 != 0) {
half last = input.m_impl.coeff(num_coeffs-1);
*scratch = __halves2half2(last, reducer.initialize());
} else {
*scratch = reducer.template initializePacket<half2>();
}
__syncthreads();
}
#else
if (gridDim.x == 1) {
if (first_index == 0) {
if (num_coeffs % 2 != 0) {
@ -264,8 +252,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input,
__syncthreads();
}
#endif
half2 accum = reducer.template initializePacket<half2>();
const Index max_iter = numext::mini<Index>((num_coeffs - first_index) / 2, NumPerThread*BlockSize / 2);
for (Index i = 0; i < max_iter; i += BlockSize) {
@ -295,17 +281,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input,
atomicReduce(scratch, accum, reducer);
}
#if defined(EIGEN_HIPCC)
__syncthreads();
if (gridDim.x == 1 && first_index == 0) {
half tmp = __low2half(*scratch);
reducer.reduce(__high2half(*scratch), &tmp);
*output = tmp;
}
#else
if (gridDim.x == 1) {
__syncthreads();
if (first_index == 0) {
@ -314,8 +289,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input,
*output = tmp;
}
}
#endif
}
template <typename Op>
@ -485,7 +458,9 @@ __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index nu
#pragma unroll
for (int offset = warpSize/2; offset > 0; offset /= 2) {
#if defined(EIGEN_HIPCC)
// XXX use std::is_floating_point to determine the type of reduced_val
// use std::is_floating_point to determine the type of reduced_val
// This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error
// and list the float and int versions of __shfl_down as the candidate functions.
if (std::is_floating_point<Type>::value) {
reducer.reduce(__shfl_down(static_cast<float>(reduced_val), offset), &reduced_val);
} else {
@ -847,8 +822,4 @@ struct OuterReducer<Self, Op, GpuDevice> {
} // end namespace internal
} // end namespace Eigen
#if defined(EIGEN_HIPCC)
#undef warpSize
#endif
#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H

View File

@ -53,8 +53,8 @@ namespace Eigen {
#include "src/SpecialFunctions/SpecialFunctionsFunctors.h"
#include "src/SpecialFunctions/SpecialFunctionsArrayAPI.h"
#if defined EIGEN_VECTORIZE_CUDA
#include "src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h"
#if defined EIGEN_VECTORIZE_GPU
#include "src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h"
#endif
namespace Eigen {

View File

@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CUDA_SPECIALFUNCTIONS_H
#define EIGEN_CUDA_SPECIALFUNCTIONS_H
#ifndef EIGEN_GPU_SPECIALFUNCTIONS_H
#define EIGEN_GPU_SPECIALFUNCTIONS_H
namespace Eigen {
@ -223,4 +223,4 @@ pi1e<double2>(const double2& x) {
} // end namespace Eigen
#endif // EIGEN_CUDA_SPECIALFUNCTIONS_H
#endif // EIGEN_GPU_SPECIALFUNCTIONS_H