mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-01-30 17:40:05 +08:00
merge
This commit is contained in:
commit
c884a8e7f4
@ -994,7 +994,7 @@ struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >
|
||||
CoeffReadCost = TraversalSize==Dynamic ? HugeCost
|
||||
: TraversalSize * evaluator<ArgType>::CoeffReadCost + int(CostOpType::value),
|
||||
|
||||
Flags = (traits<XprType>::Flags&RowMajorBit) | (evaluator<ArgType>::Flags&HereditaryBits),
|
||||
Flags = (traits<XprType>::Flags&RowMajorBit) | (evaluator<ArgType>::Flags&(HereditaryBits&(~RowMajorBit))),
|
||||
|
||||
Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized
|
||||
};
|
||||
|
@ -676,8 +676,13 @@ struct scalar_sign_op<Scalar,true> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op)
|
||||
EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const
|
||||
{
|
||||
typename NumTraits<Scalar>::Real aa = std::abs(a);
|
||||
return (aa==0) ? Scalar(0) : (a/aa);
|
||||
using std::abs;
|
||||
typedef typename NumTraits<Scalar>::Real real_type;
|
||||
real_type aa = abs(a);
|
||||
if (aa==0)
|
||||
return Scalar(0);
|
||||
aa = 1./aa;
|
||||
return Scalar(real(a)*aa, imag(a)*aa );
|
||||
}
|
||||
//TODO
|
||||
//template <typename Packet>
|
||||
|
@ -5,7 +5,7 @@ endif (ADOLC_INCLUDES AND ADOLC_LIBRARIES)
|
||||
|
||||
find_path(ADOLC_INCLUDES
|
||||
NAMES
|
||||
adolc/adouble.h
|
||||
adolc/adtl.h
|
||||
PATHS
|
||||
$ENV{ADOLCDIR}
|
||||
${INCLUDE_INSTALL_DIR}
|
||||
|
@ -11,9 +11,10 @@
|
||||
|
||||
void test_is_same_dense()
|
||||
{
|
||||
MatrixXd m1(10,10);
|
||||
Ref<MatrixXd> ref_m1(m1);
|
||||
Ref<const MatrixXd> const_ref_m1(m1);
|
||||
typedef Matrix<double,Dynamic,Dynamic,ColMajor> ColMatrixXd;
|
||||
ColMatrixXd m1(10,10);
|
||||
Ref<ColMatrixXd> ref_m1(m1);
|
||||
Ref<const ColMatrixXd> const_ref_m1(m1);
|
||||
VERIFY(is_same_dense(m1,m1));
|
||||
VERIFY(is_same_dense(m1,ref_m1));
|
||||
VERIFY(is_same_dense(const_ref_m1,m1));
|
||||
@ -22,9 +23,9 @@ void test_is_same_dense()
|
||||
VERIFY(is_same_dense(m1.block(0,0,m1.rows(),m1.cols()),m1));
|
||||
VERIFY(!is_same_dense(m1.row(0),m1.col(0)));
|
||||
|
||||
Ref<const MatrixXd> const_ref_m1_row(m1.row(1));
|
||||
Ref<const ColMatrixXd> const_ref_m1_row(m1.row(1));
|
||||
VERIFY(!is_same_dense(m1.row(1),const_ref_m1_row));
|
||||
|
||||
Ref<const MatrixXd> const_ref_m1_col(m1.col(1));
|
||||
Ref<const ColMatrixXd> const_ref_m1_col(m1.col(1));
|
||||
VERIFY(is_same_dense(m1.col(1),const_ref_m1_col));
|
||||
}
|
||||
|
@ -51,6 +51,7 @@ template <typename MatrixType> void run_nesting_ops_2(const MatrixType& _m)
|
||||
Index rows = _m.rows();
|
||||
Index cols = _m.cols();
|
||||
MatrixType m1 = MatrixType::Random(rows,cols);
|
||||
Matrix<Scalar,MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime,ColMajor> m2;
|
||||
|
||||
if((MatrixType::SizeAtCompileTime==Dynamic))
|
||||
{
|
||||
@ -79,9 +80,9 @@ template <typename MatrixType> void run_nesting_ops_2(const MatrixType& _m)
|
||||
}
|
||||
VERIFY( verify_eval_type<2>(m1+m1, m1+m1) );
|
||||
VERIFY( verify_eval_type<3>(m1+m1, m1) );
|
||||
VERIFY( verify_eval_type<1>(m1*m1.transpose(), m1) );
|
||||
VERIFY( verify_eval_type<1>(m1*(m1+m1).transpose(), m1) );
|
||||
VERIFY( verify_eval_type<2>(m1*m1.transpose(), m1) );
|
||||
VERIFY( verify_eval_type<1>(m1*m1.transpose(), m2) );
|
||||
VERIFY( verify_eval_type<1>(m1*(m1+m1).transpose(), m2) );
|
||||
VERIFY( verify_eval_type<2>(m1*m1.transpose(), m2) );
|
||||
VERIFY( verify_eval_type<1>(m1+m1*m1, m1) );
|
||||
|
||||
VERIFY( verify_eval_type<1>(m1.template triangularView<Lower>().solve(m1), m1) );
|
||||
|
@ -18,7 +18,9 @@ template<typename T> T negate(const T& x) { return -x; }
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Scalar> bool isApproxAbs(const Scalar& a, const Scalar& b, const typename NumTraits<Scalar>::Real& refvalue)
|
||||
// NOTE: we disbale inlining for this function to workaround a GCC issue when using -O3 and the i387 FPU.
|
||||
template<typename Scalar> EIGEN_DONT_INLINE
|
||||
bool isApproxAbs(const Scalar& a, const Scalar& b, const typename NumTraits<Scalar>::Real& refvalue)
|
||||
{
|
||||
return internal::isMuchSmallerThan(a-b, refvalue);
|
||||
}
|
||||
|
@ -1,12 +1,15 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
|
||||
#undef EIGEN_DEFAULT_TO_ROW_MAJOR
|
||||
#endif
|
||||
#define EIGEN_DEBUG_ASSIGN
|
||||
#include "main.h"
|
||||
#include <typeinfo>
|
||||
|
@ -78,7 +78,7 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
|
||||
IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign),
|
||||
PacketAccess = (internal::packet_traits<Scalar>::size > 1),
|
||||
Layout = Options_ & RowMajor ? RowMajor : ColMajor,
|
||||
CoordAccess = true,
|
||||
CoordAccess = true
|
||||
};
|
||||
|
||||
static const int Options = Options_;
|
||||
@ -368,7 +368,7 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
|
||||
EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
inline explicit Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5)
|
||||
: m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 4>(dim1, dim2, dim3, dim4, dim5))
|
||||
: m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5))
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ template<typename PlainObjectType, int Options_> class TensorMap : public Tensor
|
||||
IsAligned = ((int(Options_)&Aligned)==Aligned),
|
||||
PacketAccess = (internal::packet_traits<Scalar>::size > 1),
|
||||
Layout = PlainObjectType::Layout,
|
||||
CoordAccess = true,
|
||||
CoordAccess = true
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -158,7 +158,7 @@ template<typename PlainObjectType, int Options_> class TensorMap : public Tensor
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i1 + i0 * m_dimensions[0];
|
||||
const Index index = i1 + i0 * m_dimensions[1];
|
||||
return m_data[index];
|
||||
} else {
|
||||
const Index index = i0 + i1 * m_dimensions[0];
|
||||
@ -169,7 +169,7 @@ template<typename PlainObjectType, int Options_> class TensorMap : public Tensor
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i2 + m_dimensions[1] * (i1 + m_dimensions[0] * i0);
|
||||
const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
|
||||
return m_data[index];
|
||||
} else {
|
||||
const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2);
|
||||
@ -245,7 +245,7 @@ template<typename PlainObjectType, int Options_> class TensorMap : public Tensor
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i1 + i0 * m_dimensions[0];
|
||||
const Index index = i1 + i0 * m_dimensions[1];
|
||||
return m_data[index];
|
||||
} else {
|
||||
const Index index = i0 + i1 * m_dimensions[0];
|
||||
@ -256,7 +256,7 @@ template<typename PlainObjectType, int Options_> class TensorMap : public Tensor
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i2 + m_dimensions[1] * (i1 + m_dimensions[0] * i0);
|
||||
const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
|
||||
return m_data[index];
|
||||
} else {
|
||||
const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2);
|
||||
|
@ -29,8 +29,8 @@ static void test_1d()
|
||||
int row_major[6];
|
||||
memset(col_major, 0, 6*sizeof(int));
|
||||
memset(row_major, 0, 6*sizeof(int));
|
||||
TensorMap<Tensor<int, 1>> vec3(col_major, 6);
|
||||
TensorMap<Tensor<int, 1, RowMajor>> vec4(row_major, 6);
|
||||
TensorMap<Tensor<int, 1> > vec3(col_major, 6);
|
||||
TensorMap<Tensor<int, 1, RowMajor> > vec4(row_major, 6);
|
||||
|
||||
vec3 = vec1;
|
||||
vec4 = vec2;
|
||||
@ -92,8 +92,8 @@ static void test_2d()
|
||||
int row_major[6];
|
||||
memset(col_major, 0, 6*sizeof(int));
|
||||
memset(row_major, 0, 6*sizeof(int));
|
||||
TensorMap<Tensor<int, 2>> mat3(row_major, 2, 3);
|
||||
TensorMap<Tensor<int, 2, RowMajor>> mat4(col_major, 2, 3);
|
||||
TensorMap<Tensor<int, 2> > mat3(row_major, 2, 3);
|
||||
TensorMap<Tensor<int, 2, RowMajor> > mat4(col_major, 2, 3);
|
||||
|
||||
mat3 = mat1;
|
||||
mat4 = mat2;
|
||||
@ -152,8 +152,8 @@ static void test_3d()
|
||||
int row_major[2*3*7];
|
||||
memset(col_major, 0, 2*3*7*sizeof(int));
|
||||
memset(row_major, 0, 2*3*7*sizeof(int));
|
||||
TensorMap<Tensor<int, 3>> mat3(col_major, 2, 3, 7);
|
||||
TensorMap<Tensor<int, 3, RowMajor>> mat4(row_major, 2, 3, 7);
|
||||
TensorMap<Tensor<int, 3> > mat3(col_major, 2, 3, 7);
|
||||
TensorMap<Tensor<int, 3, RowMajor> > mat4(row_major, 2, 3, 7);
|
||||
|
||||
mat3 = mat1;
|
||||
mat4 = mat2;
|
||||
|
@ -24,12 +24,12 @@ static void test_simple_cast()
|
||||
cplextensor.setRandom();
|
||||
|
||||
chartensor = ftensor.cast<char>();
|
||||
cplextensor = ftensor.cast<std::complex<float>>();
|
||||
cplextensor = ftensor.cast<std::complex<float> >();
|
||||
|
||||
for (int i = 0; i < 20; ++i) {
|
||||
for (int j = 0; j < 30; ++j) {
|
||||
VERIFY_IS_EQUAL(chartensor(i,j), static_cast<char>(ftensor(i,j)));
|
||||
VERIFY_IS_EQUAL(cplextensor(i,j), static_cast<std::complex<float>>(ftensor(i,j)));
|
||||
VERIFY_IS_EQUAL(cplextensor(i,j), static_cast<std::complex<float> >(ftensor(i,j)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,9 @@ struct InsertZeros {
|
||||
template <typename Output, typename Device>
|
||||
void eval(const Tensor<float, 2>& input, Output& output, const Device& device) const
|
||||
{
|
||||
array<DenseIndex, 2> strides{{2, 2}};
|
||||
array<DenseIndex, 2> strides;
|
||||
strides[0] = 2;
|
||||
strides[1] = 2;
|
||||
output.stride(strides).device(device) = input;
|
||||
|
||||
Eigen::DSizes<DenseIndex, 2> offsets(1,1);
|
||||
@ -70,7 +72,8 @@ struct BatchMatMul {
|
||||
Output& output, const Device& device) const
|
||||
{
|
||||
typedef Tensor<float, 3>::DimensionPair DimPair;
|
||||
array<DimPair, 1> dims({{DimPair(1, 0)}});
|
||||
array<DimPair, 1> dims;
|
||||
dims[0] = DimPair(1, 0);
|
||||
for (int i = 0; i < output.dimension(2); ++i) {
|
||||
output.template chip<2>(i).device(device) = input1.chip<2>(i).contract(input2.chip<2>(i), dims);
|
||||
}
|
||||
@ -88,9 +91,10 @@ static void test_custom_binary_op()
|
||||
Tensor<float, 3> result = tensor1.customOp(tensor2, BatchMatMul());
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
typedef Tensor<float, 3>::DimensionPair DimPair;
|
||||
array<DimPair, 1> dims({{DimPair(1, 0)}});
|
||||
array<DimPair, 1> dims;
|
||||
dims[0] = DimPair(1, 0);
|
||||
Tensor<float, 2> reference = tensor1.chip<2>(i).contract(tensor2.chip<2>(i), dims);
|
||||
TensorRef<Tensor<float, 2>> val = result.chip<2>(i);
|
||||
TensorRef<Tensor<float, 2> > val = result.chip<2>(i);
|
||||
for (int j = 0; j < 2; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_APPROX(val(j, k), reference(j, k));
|
||||
|
@ -114,10 +114,18 @@ static void test_expr_reverse(bool LValue)
|
||||
|
||||
Tensor<float, 4, DataLayout> result(2,3,5,7);
|
||||
|
||||
array<ptrdiff_t, 4> src_slice_dim{{2,3,1,7}};
|
||||
array<ptrdiff_t, 4> src_slice_start{{0,0,0,0}};
|
||||
array<ptrdiff_t, 4> dst_slice_dim{{2,3,1,7}};
|
||||
array<ptrdiff_t, 4> dst_slice_start{{0,0,0,0}};
|
||||
array<ptrdiff_t, 4> src_slice_dim;
|
||||
src_slice_dim[0] = 2;
|
||||
src_slice_dim[1] = 3;
|
||||
src_slice_dim[2] = 1;
|
||||
src_slice_dim[3] = 7;
|
||||
array<ptrdiff_t, 4> src_slice_start;
|
||||
src_slice_start[0] = 0;
|
||||
src_slice_start[1] = 0;
|
||||
src_slice_start[2] = 0;
|
||||
src_slice_start[3] = 0;
|
||||
array<ptrdiff_t, 4> dst_slice_dim = src_slice_dim;
|
||||
array<ptrdiff_t, 4> dst_slice_start = src_slice_start;
|
||||
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
if (LValue) {
|
||||
|
@ -18,7 +18,7 @@ static void test_comparison_sugar() {
|
||||
|
||||
#define TEST_TENSOR_EQUAL(e1, e2) \
|
||||
b = ((e1) == (e2)).all(); \
|
||||
VERIFY(b(0))
|
||||
VERIFY(b())
|
||||
|
||||
#define TEST_OP(op) TEST_TENSOR_EQUAL(t op 0, t op t.constant(0))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user