mirror of
https://gitlab.com/libeigen/eigen.git
synced 2024-12-15 07:10:37 +08:00
Added support for tensor chips
This commit is contained in:
parent
4b36c3591f
commit
2ed1838aeb
@ -47,6 +47,7 @@
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h"
|
||||
#include "unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h"
|
||||
|
@ -254,6 +254,11 @@ class TensorBase<Derived, ReadOnlyAccessors>
|
||||
slice(const StartIndices& startIndices, const Sizes& sizes) const {
|
||||
return TensorSlicingOp<const StartIndices, const Sizes, const Derived>(derived(), startIndices, sizes);
|
||||
}
|
||||
template <std::size_t DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
const TensorChippingOp<DimId, const Derived>
|
||||
chip(const Index offset) const {
|
||||
return TensorChippingOp<DimId, const Derived>(derived(), offset);
|
||||
}
|
||||
template <typename PaddingDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
const TensorPaddingOp<const PaddingDimensions, const Derived>
|
||||
pad(const PaddingDimensions& padding) const {
|
||||
@ -327,7 +332,7 @@ class TensorBase<Derived, WriteAccessors> : public TensorBase<Derived, ReadOnlyA
|
||||
|
||||
template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
TensorReshapingOp<const NewDimensions, Derived>
|
||||
reshape(const NewDimensions& newDimensions) {
|
||||
reshape(const NewDimensions& newDimensions) const {
|
||||
return TensorReshapingOp<const NewDimensions, Derived>(derived(), newDimensions);
|
||||
}
|
||||
template <typename StartIndices, typename Sizes> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
@ -335,6 +340,11 @@ class TensorBase<Derived, WriteAccessors> : public TensorBase<Derived, ReadOnlyA
|
||||
slice(const StartIndices& startIndices, const Sizes& sizes) const {
|
||||
return TensorSlicingOp<const StartIndices, const Sizes, Derived>(derived(), startIndices, sizes);
|
||||
}
|
||||
template <std::size_t DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
TensorChippingOp<DimId, Derived>
|
||||
chip(const Index offset) const {
|
||||
return TensorChippingOp<DimId, Derived>(derived(), offset);
|
||||
}
|
||||
template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
TensorShufflingOp<const Shuffle, Derived>
|
||||
shuffle(const Shuffle& shuffle) const {
|
||||
|
232
unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
Normal file
232
unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
Normal file
@ -0,0 +1,232 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
|
||||
#define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class TensorKChippingReshaping
|
||||
* \ingroup CXX11_Tensor_Module
|
||||
*
|
||||
* \brief A chip is a thin slice, corresponding to a column or a row in a 2-d tensor.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template<std::size_t DimId, typename XprType>
|
||||
struct traits<TensorChippingOp<DimId, XprType> > : public traits<XprType>
|
||||
{
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type Packet;
|
||||
typedef typename traits<XprType>::StorageKind StorageKind;
|
||||
typedef typename traits<XprType>::Index Index;
|
||||
typedef typename XprType::Nested Nested;
|
||||
typedef typename remove_reference<Nested>::type _Nested;
|
||||
};
|
||||
|
||||
template<std::size_t DimId, typename XprType>
|
||||
struct eval<TensorChippingOp<DimId, XprType>, Eigen::Dense>
|
||||
{
|
||||
typedef const TensorChippingOp<DimId, XprType>& type;
|
||||
};
|
||||
|
||||
template<std::size_t DimId, typename XprType>
|
||||
struct nested<TensorChippingOp<DimId, XprType>, 1, typename eval<TensorChippingOp<DimId, XprType> >::type>
|
||||
{
|
||||
typedef TensorChippingOp<DimId, XprType> type;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
|
||||
|
||||
template<std::size_t DimId, typename XprType>
|
||||
class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> >
|
||||
{
|
||||
public:
|
||||
typedef typename Eigen::internal::traits<TensorChippingOp>::Scalar Scalar;
|
||||
typedef typename Eigen::internal::traits<TensorChippingOp>::Packet Packet;
|
||||
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketReturnType PacketReturnType;
|
||||
typedef typename Eigen::internal::nested<TensorChippingOp>::type Nested;
|
||||
typedef typename Eigen::internal::traits<TensorChippingOp>::StorageKind StorageKind;
|
||||
typedef typename Eigen::internal::traits<TensorChippingOp>::Index Index;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset)
|
||||
: m_xpr(expr), m_offset(offset) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
const Index offset() const { return m_offset; }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
const typename internal::remove_all<typename XprType::Nested>::type&
|
||||
expression() const { return m_xpr; }
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorChippingOp& operator = (const OtherDerived& other)
|
||||
{
|
||||
typedef TensorAssignOp<TensorChippingOp, const OtherDerived> Assign;
|
||||
Assign assign(*this, other);
|
||||
internal::TensorExecutor<const Assign, DefaultDevice, false>::run(assign, DefaultDevice());
|
||||
return *this;
|
||||
}
|
||||
|
||||
protected:
|
||||
typename XprType::Nested m_xpr;
|
||||
const Index m_offset;
|
||||
};
|
||||
|
||||
|
||||
// Eval as rvalue
|
||||
template<std::size_t DimId, typename ArgType, typename Device>
|
||||
struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
|
||||
{
|
||||
typedef TensorChippingOp<DimId, ArgType> XprType;
|
||||
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
|
||||
static const int NumDims = NumInputDims-1;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef DSizes<Index, NumDims> Dimensions;
|
||||
|
||||
enum {
|
||||
// Alignment can't be guaranteed at compile time since it depends on the
|
||||
// slice offsets.
|
||||
IsAligned = false,
|
||||
PacketAccess = false, // not yet implemented
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
|
||||
: m_impl(op.expression(), device), m_device(device)
|
||||
{
|
||||
// We could also support the case where NumInputDims==1 if needed.
|
||||
EIGEN_STATIC_ASSERT(NumInputDims >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
EIGEN_STATIC_ASSERT(NumInputDims > DimId, YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
|
||||
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
|
||||
int j = 0;
|
||||
for (int i = 0; i < NumInputDims; ++i) {
|
||||
if (i != DimId) {
|
||||
m_dimensions[j] = input_dims[i];
|
||||
++j;
|
||||
}
|
||||
}
|
||||
|
||||
m_stride = 1;
|
||||
m_inputStride = 1;
|
||||
for (int i = 0; i < DimId; ++i) {
|
||||
m_stride *= input_dims[i];
|
||||
m_inputStride *= input_dims[i];
|
||||
}
|
||||
m_inputStride *= input_dims[DimId];
|
||||
m_inputOffset = m_stride * op.offset();
|
||||
}
|
||||
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketReturnType PacketReturnType;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
|
||||
m_impl.evalSubExprsIfNeeded(NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
|
||||
m_impl.cleanup();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
|
||||
{
|
||||
return m_impl.coeff(srcCoeff(index));
|
||||
}
|
||||
|
||||
/* to be done
|
||||
template<int LoadMode>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
|
||||
{
|
||||
|
||||
}*/
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const { return NULL; }
|
||||
|
||||
protected:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
|
||||
{
|
||||
Index inputIndex;
|
||||
if (DimId == 0) {
|
||||
// m_stride is equal to 1, so let's avoid the integer division.
|
||||
eigen_assert(m_stride == 1);
|
||||
inputIndex = index * m_inputStride + m_inputOffset;
|
||||
} else if (DimId == NumInputDims-1) {
|
||||
// m_stride is aways greater than index, so let's avoid the integer division.
|
||||
eigen_assert(m_stride > index);
|
||||
inputIndex = index + m_inputOffset;
|
||||
} else {
|
||||
const Index idx = index / m_stride;
|
||||
inputIndex = idx * m_inputStride + m_inputOffset;
|
||||
index -= idx * m_stride;
|
||||
inputIndex += index;
|
||||
}
|
||||
return inputIndex;
|
||||
}
|
||||
|
||||
Dimensions m_dimensions;
|
||||
Index m_stride;
|
||||
Index m_inputOffset;
|
||||
Index m_inputStride;
|
||||
TensorEvaluator<ArgType, Device> m_impl;
|
||||
const Device& m_device;
|
||||
};
|
||||
|
||||
|
||||
// Eval as lvalue
|
||||
template<std::size_t DimId, typename ArgType, typename Device>
|
||||
struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
|
||||
: public TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
|
||||
{
|
||||
typedef TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> Base;
|
||||
typedef TensorChippingOp<DimId, ArgType> XprType;
|
||||
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
|
||||
static const int NumDims = NumInputDims-1;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef DSizes<Index, NumDims> Dimensions;
|
||||
|
||||
enum {
|
||||
IsAligned = false,
|
||||
PacketAccess = false,
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
|
||||
: Base(op, device)
|
||||
{ }
|
||||
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketReturnType PacketReturnType;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
|
||||
{
|
||||
return this->m_impl.coeffRef(this->srcCoeff(index));
|
||||
}
|
||||
|
||||
/* to be done
|
||||
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
void writePacket(Index index, const PacketReturnType& x)
|
||||
{
|
||||
} */
|
||||
};
|
||||
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
|
@ -21,11 +21,12 @@ template<typename NullaryOp, typename PlainObjectType> class TensorCwiseNullaryO
|
||||
template<typename UnaryOp, typename XprType> class TensorCwiseUnaryOp;
|
||||
template<typename BinaryOp, typename LeftXprType, typename RightXprType> class TensorCwiseBinaryOp;
|
||||
template<typename IfXprType, typename ThenXprType, typename ElseXprType> class TensorSelectOp;
|
||||
template<typename Broadcast, typename XprType> class TensorBroadcastingOp;
|
||||
template<typename Op, typename Dims, typename XprType> class TensorReductionOp;
|
||||
template<typename Axis, typename LeftXprType, typename RightXprType> class TensorConcatenationOp;
|
||||
template<typename Dimensions, typename LeftXprType, typename RightXprType> class TensorContractionOp;
|
||||
template<typename Dimensions, typename InputXprType, typename KernelXprType> class TensorConvolutionOp;
|
||||
template<typename Broadcast, typename XprType> class TensorBroadcastingOp;
|
||||
template<std::size_t DimId, typename XprType> class TensorChippingOp;
|
||||
template<typename NewDimensions, typename XprType> class TensorReshapingOp;
|
||||
template<typename StartIndices, typename Sizes, typename XprType> class TensorSlicingOp;
|
||||
template<typename PaddingDimensions, typename XprType> class TensorPaddingOp;
|
||||
|
@ -115,6 +115,7 @@ if(EIGEN_TEST_CXX11)
|
||||
ei_add_test(cxx11_tensor_lvalue "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_map "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_broadcasting "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_chipping "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_concatenation "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_morphing "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_padding "-std=c++0x")
|
||||
|
244
unsupported/test/cxx11_tensor_chipping.cpp
Normal file
244
unsupported/test/cxx11_tensor_chipping.cpp
Normal file
@ -0,0 +1,244 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#include "main.h"
|
||||
|
||||
#include <Eigen/CXX11/Tensor>
|
||||
|
||||
using Eigen::Tensor;
|
||||
|
||||
|
||||
static void test_simple_chip()
|
||||
{
|
||||
Tensor<float, 5> tensor(2,3,5,7,11);
|
||||
tensor.setRandom();
|
||||
|
||||
Tensor<float, 4> chip1;
|
||||
chip1 = tensor.chip<0>(1);
|
||||
VERIFY_IS_EQUAL(chip1.dimension(0), 3);
|
||||
VERIFY_IS_EQUAL(chip1.dimension(1), 5);
|
||||
VERIFY_IS_EQUAL(chip1.dimension(2), 7);
|
||||
VERIFY_IS_EQUAL(chip1.dimension(3), 11);
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
for (int j = 0; j < 5; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
for (int l = 0; l < 11; ++l) {
|
||||
VERIFY_IS_EQUAL(chip1(i,j,k,l), tensor(1,i,j,k,l));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 4> chip2 = tensor.chip<1>(1);
|
||||
VERIFY_IS_EQUAL(chip2.dimension(0), 2);
|
||||
VERIFY_IS_EQUAL(chip2.dimension(1), 5);
|
||||
VERIFY_IS_EQUAL(chip2.dimension(2), 7);
|
||||
VERIFY_IS_EQUAL(chip2.dimension(3), 11);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
for (int l = 0; l < 11; ++l) {
|
||||
VERIFY_IS_EQUAL(chip2(i,j,k,l), tensor(i,1,j,k,l));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 4> chip3 = tensor.chip<2>(2);
|
||||
VERIFY_IS_EQUAL(chip3.dimension(0), 2);
|
||||
VERIFY_IS_EQUAL(chip3.dimension(1), 3);
|
||||
VERIFY_IS_EQUAL(chip3.dimension(2), 7);
|
||||
VERIFY_IS_EQUAL(chip3.dimension(3), 11);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
for (int l = 0; l < 11; ++l) {
|
||||
VERIFY_IS_EQUAL(chip3(i,j,k,l), tensor(i,j,2,k,l));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 4> chip4(tensor.chip<3>(5));
|
||||
VERIFY_IS_EQUAL(chip4.dimension(0), 2);
|
||||
VERIFY_IS_EQUAL(chip4.dimension(1), 3);
|
||||
VERIFY_IS_EQUAL(chip4.dimension(2), 5);
|
||||
VERIFY_IS_EQUAL(chip4.dimension(3), 11);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 5; ++k) {
|
||||
for (int l = 0; l < 7; ++l) {
|
||||
VERIFY_IS_EQUAL(chip4(i,j,k,l), tensor(i,j,k,5,l));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 4> chip5(tensor.chip<4>(7));
|
||||
VERIFY_IS_EQUAL(chip5.dimension(0), 2);
|
||||
VERIFY_IS_EQUAL(chip5.dimension(1), 3);
|
||||
VERIFY_IS_EQUAL(chip5.dimension(2), 5);
|
||||
VERIFY_IS_EQUAL(chip5.dimension(3), 7);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 5; ++k) {
|
||||
for (int l = 0; l < 7; ++l) {
|
||||
VERIFY_IS_EQUAL(chip5(i,j,k,l), tensor(i,j,k,l,7));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void test_chip_in_expr() {
|
||||
Tensor<float, 5> input1(2,3,5,7,11);
|
||||
input1.setRandom();
|
||||
Tensor<float, 4> input2(3,5,7,11);
|
||||
input2.setRandom();
|
||||
|
||||
Tensor<float, 4> result = input1.chip<0>(0) + input2;
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
for (int j = 0; j < 5; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
for (int l = 0; l < 11; ++l) {
|
||||
float expected = input1(0,i,j,k,l) + input2(i,j,k,l);
|
||||
VERIFY_IS_EQUAL(result(i,j,k,l), expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 3> input3(3,7,11);
|
||||
input3.setRandom();
|
||||
Tensor<float, 3> result2 = input1.chip<0>(0).chip<1>(2) + input3;
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
for (int j = 0; j < 7; ++j) {
|
||||
for (int k = 0; k < 11; ++k) {
|
||||
float expected = input1(0,i,2,j,k) + input3(i,j,k);
|
||||
VERIFY_IS_EQUAL(result2(i,j,k), expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void test_chip_as_lvalue()
|
||||
{
|
||||
Tensor<float, 5> input1(2,3,5,7,11);
|
||||
input1.setRandom();
|
||||
|
||||
Tensor<float, 4> input2(3,5,7,11);
|
||||
input2.setRandom();
|
||||
Tensor<float, 5> tensor = input1;
|
||||
tensor.chip<0>(1) = input2;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 5; ++k) {
|
||||
for (int l = 0; l < 7; ++l) {
|
||||
for (int m = 0; m < 11; ++m) {
|
||||
if (i != 1) {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
|
||||
} else {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input2(j,k,l,m));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 4> input3(2,5,7,11);
|
||||
input3.setRandom();
|
||||
tensor = input1;
|
||||
tensor.chip<1>(1) = input3;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 5; ++k) {
|
||||
for (int l = 0; l < 7; ++l) {
|
||||
for (int m = 0; m < 11; ++m) {
|
||||
if (j != 1) {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
|
||||
} else {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input3(i,k,l,m));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 4> input4(2,3,7,11);
|
||||
input4.setRandom();
|
||||
tensor = input1;
|
||||
tensor.chip<2>(3) = input4;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 5; ++k) {
|
||||
for (int l = 0; l < 7; ++l) {
|
||||
for (int m = 0; m < 11; ++m) {
|
||||
if (k != 3) {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
|
||||
} else {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input4(i,j,l,m));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 4> input5(2,3,5,11);
|
||||
input5.setRandom();
|
||||
tensor = input1;
|
||||
tensor.chip<3>(4) = input5;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 5; ++k) {
|
||||
for (int l = 0; l < 7; ++l) {
|
||||
for (int m = 0; m < 11; ++m) {
|
||||
if (l != 4) {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
|
||||
} else {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input5(i,j,k,m));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tensor<float, 4> input6(2,3,5,7);
|
||||
input6.setRandom();
|
||||
tensor = input1;
|
||||
tensor.chip<4>(5) = input6;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 5; ++k) {
|
||||
for (int l = 0; l < 7; ++l) {
|
||||
for (int m = 0; m < 11; ++m) {
|
||||
if (m != 5) {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
|
||||
} else {
|
||||
VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input6(i,j,k,l));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void test_cxx11_tensor_chipping()
|
||||
{
|
||||
CALL_SUBTEST(test_simple_chip());
|
||||
CALL_SUBTEST(test_chip_in_expr());
|
||||
CALL_SUBTEST(test_chip_as_lvalue());
|
||||
}
|
Loading…
Reference in New Issue
Block a user