Fix several integer conversion and sign-compare warnings

This commit is contained in:
Christoph Hertzberg 2018-08-24 22:58:55 +02:00
parent 949b0ad9cb
commit f7675b826b
6 changed files with 80 additions and 81 deletions

View File

@ -212,11 +212,11 @@ class TensorBlockIO {
num_size_one_inner_dims, NumDims - num_size_one_inner_dims - 1);
const StorageIndex block_dim_for_tensor_stride1_dim =
NumDims == 0 ? 1 : tensor_to_block_dim_map[tensor_stride1_dim];
size_t block_inner_dim_size =
StorageIndex block_inner_dim_size =
NumDims == 0 ? 1
: block.block_sizes()[block_dim_for_tensor_stride1_dim];
for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) {
const int dim = cond<Layout>()(i, NumDims - i - 1);
for (Index i = num_size_one_inner_dims + 1; i < NumDims; ++i) {
const Index dim = cond<Layout>()(i, NumDims - i - 1);
const StorageIndex block_stride =
block.block_strides()[tensor_to_block_dim_map[dim]];
if (block_inner_dim_size == block_stride &&
@ -258,8 +258,8 @@ class TensorBlockIO {
// Initialize block iterator state. Squeeze away any dimension of size 1.
int num_squeezed_dims = 0;
for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) {
const int dim = cond<Layout>()(i + 1, NumDims - i - 2);
for (Index i = num_size_one_inner_dims; i < NumDims - 1; ++i) {
const Index dim = cond<Layout>()(i + 1, NumDims - i - 2);
const StorageIndex size = block.block_sizes()[tensor_to_block_dim_map[dim]];
if (size == 1) {
continue;
@ -626,7 +626,7 @@ class TensorBlockMapper {
const TensorBlockShapeType block_shape,
Index min_target_size)
: m_dimensions(dims),
m_block_dim_sizes(BlockDimensions(dims, block_shape, min_target_size)) {
m_block_dim_sizes(BlockDimensions(dims, block_shape, internal::convert_index<StorageIndex>(min_target_size))) {
// Calculate block counts by dimension and total block count.
DSizes<StorageIndex, NumDims> block_count;
for (Index i = 0; i < block_count.rank(); ++i) {
@ -717,8 +717,8 @@ class TensorBlockMapper {
private:
static Dimensions BlockDimensions(const Dimensions& tensor_dims,
const TensorBlockShapeType block_shape,
Index min_target_size) {
min_target_size = numext::maxi<Index>(1, min_target_size);
StorageIndex min_target_size) {
min_target_size = numext::maxi<StorageIndex>(1, min_target_size);
// If tensor fully fits into the target size, we'll treat it a single block.
Dimensions block_dim_sizes = tensor_dims;
@ -735,16 +735,15 @@ class TensorBlockMapper {
if (block_shape == kUniformAllDims) {
// Tensor will not fit within 'min_target_size' budget: calculate tensor
// block dimension sizes based on "square" dimension size target.
const size_t dim_size_target = static_cast<const size_t>(
const StorageIndex dim_size_target = internal::convert_index<StorageIndex>(
std::pow(static_cast<float>(min_target_size),
1.0f / static_cast<float>(block_dim_sizes.rank())));
for (size_t i = 0; i < block_dim_sizes.rank(); ++i) {
for (Index i = 0; i < block_dim_sizes.rank(); ++i) {
// TODO(andydavis) Adjust the inner most 'block_dim_size' to make it
// a multiple of the packet size. Note that reducing
// 'block_dim_size' in this manner can increase the number of
// blocks, and so will amplify any per-block overhead.
block_dim_sizes[i] = numext::mini(
dim_size_target, static_cast<size_t>(tensor_dims[i]));
block_dim_sizes[i] = numext::mini(dim_size_target, tensor_dims[i]);
}
// Add any un-allocated coefficients to inner dimension(s).
StorageIndex total_size = block_dim_sizes.TotalSize();
@ -781,7 +780,7 @@ class TensorBlockMapper {
eigen_assert(
block_dim_sizes.TotalSize() >=
numext::mini<size_t>(min_target_size, tensor_dims.TotalSize()));
numext::mini<Index>(min_target_size, tensor_dims.TotalSize()));
return block_dim_sizes;
}
@ -824,7 +823,7 @@ class TensorSliceBlockMapper {
m_total_block_count(1) {
// Calculate block counts by dimension and total block count.
DSizes<StorageIndex, NumDims> block_count;
for (size_t i = 0; i < block_count.rank(); ++i) {
for (Index i = 0; i < block_count.rank(); ++i) {
block_count[i] = divup(m_tensor_slice_extents[i], m_block_dim_sizes[i]);
}
m_total_block_count = array_prod(block_count);

View File

@ -32,12 +32,12 @@ namespace Eigen {
// Boilerplate code
namespace internal {
template<std::size_t n, typename Dimension> struct dget {
template<std::ptrdiff_t n, typename Dimension> struct dget {
static const std::ptrdiff_t value = get<n, Dimension>::value;
};
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
template<typename Index, std::ptrdiff_t NumIndices, std::ptrdiff_t n, bool RowMajor>
struct fixed_size_tensor_index_linearization_helper
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
@ -50,7 +50,7 @@ struct fixed_size_tensor_index_linearization_helper
}
};
template<typename Index, std::size_t NumIndices, bool RowMajor>
template<typename Index, std::ptrdiff_t NumIndices, bool RowMajor>
struct fixed_size_tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
@ -60,7 +60,7 @@ struct fixed_size_tensor_index_linearization_helper<Index, NumIndices, 0, RowMaj
}
};
template<typename Index, std::size_t n>
template<typename Index, std::ptrdiff_t n>
struct fixed_size_tensor_index_extraction_helper
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
@ -94,7 +94,7 @@ struct Sizes {
typedef internal::numeric_list<std::ptrdiff_t, Indices...> Base;
const Base t = Base();
static const std::ptrdiff_t total_size = internal::arg_prod(Indices...);
static const size_t count = Base::count;
static const ptrdiff_t count = Base::count;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t rank() const {
return Base::count;
@ -121,16 +121,16 @@ struct Sizes {
return *this;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::size_t index) const {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::ptrdiff_t index) const {
return internal::fixed_size_tensor_index_extraction_helper<std::ptrdiff_t, Base::count>::run(index, t);
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
ptrdiff_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, false>::run(indices, t);
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
ptrdiff_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, true>::run(indices, t);
}
};
@ -144,25 +144,25 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes<Indi
#else
template <std::size_t n>
template <std::ptrdiff_t n>
struct non_zero_size {
typedef internal::type2val<std::size_t, n> type;
typedef internal::type2val<std::ptrdiff_t, n> type;
};
template <>
struct non_zero_size<0> {
typedef internal::null_type type;
};
template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0, std::size_t V5=0> struct Sizes {
template <std::ptrdiff_t V1=0, std::ptrdiff_t V2=0, std::ptrdiff_t V3=0, std::ptrdiff_t V4=0, std::ptrdiff_t V5=0> struct Sizes {
typedef typename internal::make_type_list<typename non_zero_size<V1>::type, typename non_zero_size<V2>::type, typename non_zero_size<V3>::type, typename non_zero_size<V4>::type, typename non_zero_size<V5>::type >::type Base;
static const size_t count = Base::count;
static const std::size_t total_size = internal::arg_prod<Base>::value;
static const std::ptrdiff_t count = Base::count;
static const std::ptrdiff_t total_size = internal::arg_prod<Base>::value;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t rank() const {
return count;
}
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t TotalSize() {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t TotalSize() {
return internal::arg_prod<Base>::value;
}
@ -178,7 +178,7 @@ template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0
#if EIGEN_HAS_VARIADIC_TEMPLATES
template <typename... DenseIndex> Sizes(DenseIndex... /*indices*/) { }
explicit Sizes(std::initializer_list<std::size_t>) {
explicit Sizes(std::initializer_list<std::ptrdiff_t>) {
// todo: add assertion
}
#else
@ -213,18 +213,18 @@ template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
ptrdiff_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, false>::run(indices, *reinterpret_cast<const Base*>(this));
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
ptrdiff_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, true>::run(indices, *reinterpret_cast<const Base*>(this));
}
};
namespace internal {
template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes<V1, V2, V3, V4, V5>&) {
template <std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes<V1, V2, V3, V4, V5>&) {
return Sizes<V1, V2, V3, V4, V5>::total_size;
}
}
@ -233,7 +233,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes<V1, V2,
// Boilerplate
namespace internal {
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
template<typename Index, std::ptrdiff_t NumIndices, std::ptrdiff_t n, bool RowMajor>
struct tensor_index_linearization_helper
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@ -245,7 +245,7 @@ struct tensor_index_linearization_helper
}
};
template<typename Index, std::size_t NumIndices, bool RowMajor>
template<typename Index, std::ptrdiff_t NumIndices, bool RowMajor>
struct tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@ -264,7 +264,7 @@ struct DSizes : array<DenseIndex, NumDims> {
typedef array<DenseIndex, NumDims> Base;
static const int count = NumDims;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const {
return NumDims;
}
@ -298,7 +298,7 @@ struct DSizes : array<DenseIndex, NumDims> {
}
}
#else
template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
template <std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5>
EIGEN_DEVICE_FUNC DSizes(const Sizes<V1, V2, V3, V4, V5>& a) {
for (int i = 0 ; i < NumDims; ++i) {
(*this)[i] = a[i];
@ -359,7 +359,7 @@ struct DSizes : array<DenseIndex, NumDims> {
// Boilerplate
namespace internal {
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
template<typename Index, std::ptrdiff_t NumIndices, std::ptrdiff_t n, bool RowMajor>
struct tensor_vsize_index_linearization_helper
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@ -371,7 +371,7 @@ struct tensor_vsize_index_linearization_helper
}
};
template<typename Index, std::size_t NumIndices, bool RowMajor>
template<typename Index, std::ptrdiff_t NumIndices, bool RowMajor>
struct tensor_vsize_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@ -386,10 +386,10 @@ struct tensor_vsize_index_linearization_helper<Index, NumIndices, 0, RowMajor>
namespace internal {
template <typename DenseIndex, int NumDims> struct array_size<const DSizes<DenseIndex, NumDims> > {
static const size_t value = NumDims;
static const ptrdiff_t value = NumDims;
};
template <typename DenseIndex, int NumDims> struct array_size<DSizes<DenseIndex, NumDims> > {
static const size_t value = NumDims;
static const ptrdiff_t value = NumDims;
};
#ifndef EIGEN_EMULATE_CXX11_META_H
template <typename std::ptrdiff_t... Indices> struct array_size<const Sizes<Indices...> > {
@ -399,33 +399,33 @@ template <typename std::ptrdiff_t... Indices> struct array_size<Sizes<Indices...
static const std::ptrdiff_t value = Sizes<Indices...>::count;
};
template <std::ptrdiff_t n, typename std::ptrdiff_t... Indices> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<Indices...>&) {
return get<n, internal::numeric_list<std::size_t, Indices...> >::value;
return get<n, internal::numeric_list<std::ptrdiff_t, Indices...> >::value;
}
template <std::ptrdiff_t n> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<>&) {
eigen_assert(false && "should never be called");
return -1;
}
#else
template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> struct array_size<const Sizes<V1,V2,V3,V4,V5> > {
static const size_t value = Sizes<V1,V2,V3,V4,V5>::count;
template <std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5> struct array_size<const Sizes<V1,V2,V3,V4,V5> > {
static const ptrdiff_t value = Sizes<V1,V2,V3,V4,V5>::count;
};
template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> struct array_size<Sizes<V1,V2,V3,V4,V5> > {
static const size_t value = Sizes<V1,V2,V3,V4,V5>::count;
template <std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5> struct array_size<Sizes<V1,V2,V3,V4,V5> > {
static const ptrdiff_t value = Sizes<V1,V2,V3,V4,V5>::count;
};
template <std::size_t n, std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_get(const Sizes<V1,V2,V3,V4,V5>&) {
template <std::ptrdiff_t n, std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<V1,V2,V3,V4,V5>&) {
return get<n, typename Sizes<V1,V2,V3,V4,V5>::Base>::value;
}
#endif
template <typename Dims1, typename Dims2, size_t n, size_t m>
template <typename Dims1, typename Dims2, ptrdiff_t n, ptrdiff_t m>
struct sizes_match_below_dim {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1&, Dims2&) {
return false;
}
};
template <typename Dims1, typename Dims2, size_t n>
template <typename Dims1, typename Dims2, ptrdiff_t n>
struct sizes_match_below_dim<Dims1, Dims2, n, n> {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1& dims1, Dims2& dims2) {
return (array_get<n-1>(dims1) == array_get<n-1>(dims2)) &

View File

@ -256,8 +256,8 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ tr
typedef TensorBlockMapper<ScalarNoConst, StorageIndex, NumDims, Evaluator::Layout> TensorBlockMapper;
Evaluator evaluator(expr, device);
StorageIndex total_size = array_prod(evaluator.dimensions());
StorageIndex cache_size = device.firstLevelCacheSize() / sizeof(Scalar);
Index total_size = array_prod(evaluator.dimensions());
Index cache_size = device.firstLevelCacheSize() / sizeof(Scalar);
if (total_size < cache_size) {
// TODO(andydavis) Reduce block management overhead for small tensors.
internal::TensorExecutor<Expression, ThreadPoolDevice, Vectorizable,

View File

@ -84,7 +84,7 @@ template<DenseIndex n> struct NumTraits<type2index<n> >
namespace internal {
template <typename T>
EIGEN_DEVICE_FUNC void update_value(T& val, DenseIndex new_val) {
val = new_val;
val = internal::convert_index<T>(new_val);
}
template <DenseIndex n>
EIGEN_DEVICE_FUNC void update_value(type2index<n>& val, DenseIndex new_val) {

View File

@ -33,8 +33,8 @@ static internal::TensorBlockShapeType RandomShape() {
}
template <int NumDims>
static std::size_t RandomTargetSize(const DSizes<Index, NumDims>& dims) {
return internal::random<int>(1, dims.TotalSize());
static Index RandomTargetSize(const DSizes<Index, NumDims>& dims) {
return internal::random<Index>(1, dims.TotalSize());
}
template <int NumDims>
@ -178,7 +178,7 @@ static void test_block_mapper_maps_every_element() {
// Verify that every coefficient in the original Tensor is accessible through
// TensorBlock only once.
Index total_coeffs = dims.TotalSize();
VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs);
VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs);
VERIFY_IS_EQUAL(*coeff_set.begin(), 0);
VERIFY_IS_EQUAL(*coeff_set.rbegin(), total_coeffs - 1);
}
@ -208,7 +208,7 @@ static void test_slice_block_mapper_maps_every_element() {
// Pick a random dimension sizes for the tensor blocks.
DSizes<Index, NumDims> block_sizes;
for (int i = 0; i < NumDims; ++i) {
block_sizes[i] = internal::random<int>(1, tensor_slice_extents[i]);
block_sizes[i] = internal::random<Index>(1, tensor_slice_extents[i]);
}
TensorSliceBlockMapper block_mapper(tensor_dims, tensor_slice_offsets,
@ -222,7 +222,7 @@ static void test_slice_block_mapper_maps_every_element() {
&coeff_set);
}
VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs);
VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs);
}
template <typename T, int NumDims, int Layout>
@ -262,14 +262,14 @@ static void test_block_io_copy_data_from_source_to_target() {
}
template <int Layout, int NumDims>
static int GetInputIndex(Index output_index,
static Index GetInputIndex(Index output_index,
const array<Index, NumDims>& output_to_input_dim_map,
const array<Index, NumDims>& input_strides,
const array<Index, NumDims>& output_strides) {
int input_index = 0;
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) {
const int idx = output_index / output_strides[i];
const Index idx = output_index / output_strides[i];
input_index += idx * input_strides[output_to_input_dim_map[i]];
output_index -= idx * output_strides[i];
}
@ -277,7 +277,7 @@ static int GetInputIndex(Index output_index,
output_index * input_strides[output_to_input_dim_map[0]];
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const int idx = output_index / output_strides[i];
const Index idx = output_index / output_strides[i];
input_index += idx * input_strides[output_to_input_dim_map[i]];
output_index -= idx * output_strides[i];
}
@ -650,7 +650,7 @@ static void test_uniform_block_shape()
{
// Test shape 'UniformAllDims' with uniform 'max_coeff count'.
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 5 * 5 * 5 * 5 * 5;
const Index max_coeff_count = 5 * 5 * 5 * 5 * 5;
TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -664,7 +664,7 @@ static void test_uniform_block_shape()
// partially into first inner-most dimension.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 7 * 5 * 5 * 5 * 5;
const Index max_coeff_count = 7 * 5 * 5 * 5 * 5;
TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -675,7 +675,7 @@ static void test_uniform_block_shape()
VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 5 * 5 * 5 * 5 * 6;
const Index max_coeff_count = 5 * 5 * 5 * 5 * 6;
TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -690,7 +690,7 @@ static void test_uniform_block_shape()
// fully into first inner-most dimension.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 11 * 5 * 5 * 5 * 5;
const Index max_coeff_count = 11 * 5 * 5 * 5 * 5;
TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -701,7 +701,7 @@ static void test_uniform_block_shape()
VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 5 * 5 * 5 * 5 * 7;
const Index max_coeff_count = 5 * 5 * 5 * 5 * 7;
TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -716,7 +716,7 @@ static void test_uniform_block_shape()
// fully into first few inner-most dimensions.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(7, 5, 6, 17, 7);
const size_t max_coeff_count = 7 * 5 * 6 * 7 * 5;
const Index max_coeff_count = 7 * 5 * 6 * 7 * 5;
TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -728,7 +728,7 @@ static void test_uniform_block_shape()
VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(7, 5, 6, 9, 7);
const size_t max_coeff_count = 5 * 5 * 5 * 6 * 7;
const Index max_coeff_count = 5 * 5 * 5 * 6 * 7;
TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -743,7 +743,7 @@ static void test_uniform_block_shape()
// Test shape 'UniformAllDims' with full allocation to all dims.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(7, 5, 6, 17, 7);
const size_t max_coeff_count = 7 * 5 * 6 * 17 * 7;
const Index max_coeff_count = 7 * 5 * 6 * 17 * 7;
TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -755,7 +755,7 @@ static void test_uniform_block_shape()
VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(7, 5, 6, 9, 7);
const size_t max_coeff_count = 7 * 5 * 6 * 9 * 7;
const Index max_coeff_count = 7 * 5 * 6 * 9 * 7;
TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -777,7 +777,7 @@ static void test_skewed_inner_dim_block_shape()
// Test shape 'SkewedInnerDims' with partial allocation to inner-most dim.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 10 * 1 * 1 * 1 * 1;
const Index max_coeff_count = 10 * 1 * 1 * 1 * 1;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -788,7 +788,7 @@ static void test_skewed_inner_dim_block_shape()
VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 1 * 1 * 1 * 1 * 6;
const Index max_coeff_count = 1 * 1 * 1 * 1 * 6;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -802,7 +802,7 @@ static void test_skewed_inner_dim_block_shape()
// Test shape 'SkewedInnerDims' with full allocation to inner-most dim.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 11 * 1 * 1 * 1 * 1;
const Index max_coeff_count = 11 * 1 * 1 * 1 * 1;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -813,7 +813,7 @@ static void test_skewed_inner_dim_block_shape()
VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 1 * 1 * 1 * 1 * 7;
const Index max_coeff_count = 1 * 1 * 1 * 1 * 7;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -828,7 +828,7 @@ static void test_skewed_inner_dim_block_shape()
// and partial allocation to second inner-dim.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 11 * 3 * 1 * 1 * 1;
const Index max_coeff_count = 11 * 3 * 1 * 1 * 1;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -840,7 +840,7 @@ static void test_skewed_inner_dim_block_shape()
VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 1 * 1 * 1 * 15 * 7;
const Index max_coeff_count = 1 * 1 * 1 * 15 * 7;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -856,7 +856,7 @@ static void test_skewed_inner_dim_block_shape()
// and partial allocation to third inner-dim.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 11 * 5 * 5 * 1 * 1;
const Index max_coeff_count = 11 * 5 * 5 * 1 * 1;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -869,7 +869,7 @@ static void test_skewed_inner_dim_block_shape()
VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 1 * 1 * 5 * 17 * 7;
const Index max_coeff_count = 1 * 1 * 5 * 17 * 7;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -885,7 +885,7 @@ static void test_skewed_inner_dim_block_shape()
// Test shape 'SkewedInnerDims' with full allocation to all dims.
if (Layout == ColMajor) {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 11 * 5 * 6 * 17 * 7;
const Index max_coeff_count = 11 * 5 * 6 * 17 * 7;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
@ -897,7 +897,7 @@ static void test_skewed_inner_dim_block_shape()
VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
} else {
DSizes<Index, 5> dims(11, 5, 6, 17, 7);
const size_t max_coeff_count = 11 * 5 * 6 * 17 * 7;
const Index max_coeff_count = 11 * 5 * 6 * 17 * 7;
TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
max_coeff_count);
TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);

View File

@ -300,7 +300,7 @@ static void test_multithread_contraction_with_output_kernel() {
m_result = m_left * m_right;
for (size_t i = 0; i < t_result.dimensions().TotalSize(); i++) {
for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) {
VERIFY(&t_result.data()[i] != &m_result.data()[i]);
VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i]));
}