mirror of
https://gitlab.com/libeigen/eigen.git
synced 2024-12-15 07:10:37 +08:00
Restore Tensor support for non c++11 compilers
This commit is contained in:
parent
ba32ded021
commit
c07404f6a1
@ -34,25 +34,25 @@ class TensorOpCost {
|
||||
template <typename ArgType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int MulCost() {
|
||||
return internal::functor_traits<
|
||||
internal::scalar_product_op<ArgType, ArgType>>::Cost;
|
||||
internal::scalar_product_op<ArgType, ArgType> >::Cost;
|
||||
}
|
||||
template <typename ArgType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int AddCost() {
|
||||
return internal::functor_traits<internal::scalar_sum_op<ArgType>>::Cost;
|
||||
return internal::functor_traits<internal::scalar_sum_op<ArgType> >::Cost;
|
||||
}
|
||||
template <typename ArgType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int DivCost() {
|
||||
return internal::functor_traits<
|
||||
internal::scalar_quotient_op<ArgType, ArgType>>::Cost;
|
||||
internal::scalar_quotient_op<ArgType, ArgType> >::Cost;
|
||||
}
|
||||
template <typename ArgType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int ModCost() {
|
||||
return internal::functor_traits<internal::scalar_mod_op<ArgType>>::Cost;
|
||||
return internal::functor_traits<internal::scalar_mod_op<ArgType> >::Cost;
|
||||
}
|
||||
template <typename SrcType, typename TargetType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int CastCost() {
|
||||
return internal::functor_traits<
|
||||
internal::scalar_cast_op<SrcType, TargetType>>::Cost;
|
||||
internal::scalar_cast_op<SrcType, TargetType> >::Cost;
|
||||
}
|
||||
|
||||
TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {}
|
||||
|
@ -64,7 +64,7 @@ static void test_argmax_tuple_reducer()
|
||||
Tensor<Tuple<DenseIndex, float>, 0, DataLayout> reduced;
|
||||
DimensionList<DenseIndex, 4> dims;
|
||||
reduced = index_tuples.reduce(
|
||||
dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float>>());
|
||||
dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float> >());
|
||||
|
||||
Tensor<float, 0, DataLayout> maxi = tensor.maximum();
|
||||
|
||||
@ -74,7 +74,7 @@ static void test_argmax_tuple_reducer()
|
||||
for (int d = 0; d < 3; ++d) reduce_dims[d] = d;
|
||||
Tensor<Tuple<DenseIndex, float>, 1, DataLayout> reduced_by_dims(7);
|
||||
reduced_by_dims = index_tuples.reduce(
|
||||
reduce_dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float>>());
|
||||
reduce_dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float> >());
|
||||
|
||||
Tensor<float, 1, DataLayout> max_by_dims = tensor.maximum(reduce_dims);
|
||||
|
||||
@ -96,7 +96,7 @@ static void test_argmin_tuple_reducer()
|
||||
Tensor<Tuple<DenseIndex, float>, 0, DataLayout> reduced;
|
||||
DimensionList<DenseIndex, 4> dims;
|
||||
reduced = index_tuples.reduce(
|
||||
dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float>>());
|
||||
dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float> >());
|
||||
|
||||
Tensor<float, 0, DataLayout> mini = tensor.minimum();
|
||||
|
||||
@ -106,7 +106,7 @@ static void test_argmin_tuple_reducer()
|
||||
for (int d = 0; d < 3; ++d) reduce_dims[d] = d;
|
||||
Tensor<Tuple<DenseIndex, float>, 1, DataLayout> reduced_by_dims(7);
|
||||
reduced_by_dims = index_tuples.reduce(
|
||||
reduce_dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float>>());
|
||||
reduce_dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float> >());
|
||||
|
||||
Tensor<float, 1, DataLayout> min_by_dims = tensor.minimum(reduce_dims);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user