mirror of
https://gitlab.com/libeigen/eigen.git
synced 2024-12-27 07:29:52 +08:00
Merged eigen/eigen into default
This commit is contained in:
commit
832380c455
@ -213,15 +213,18 @@ template<> struct gemv_dense_selector<OnTheRight,ColMajor,true>
|
||||
ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)
|
||||
* RhsBlasTraits::extractScalarFactor(rhs);
|
||||
|
||||
// make sure Dest is a compile-time vector type (bug 1166)
|
||||
typedef typename conditional<Dest::IsVectorAtCompileTime, Dest, typename Dest::ColXpr>::type ActualDest;
|
||||
|
||||
enum {
|
||||
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
|
||||
// on, the other hand it is good for the cache to pack the vector anyways...
|
||||
EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
|
||||
EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1),
|
||||
ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
|
||||
MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
|
||||
MightCannotUseDest = (ActualDest::InnerStrideAtCompileTime!=1) || ComplexByReal
|
||||
};
|
||||
|
||||
gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
||||
gemv_static_vector_if<ResScalar,ActualDest::SizeAtCompileTime,ActualDest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
||||
|
||||
const bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
|
||||
const bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
|
||||
@ -314,7 +317,7 @@ template<> struct gemv_dense_selector<OnTheRight,RowMajor,true>
|
||||
actualLhs.rows(), actualLhs.cols(),
|
||||
LhsMapper(actualLhs.data(), actualLhs.outerStride()),
|
||||
RhsMapper(actualRhsPtr, 1),
|
||||
dest.data(), dest.innerStride(),
|
||||
dest.data(), dest.col(0).innerStride(), //NOTE if dest is not a vector at compile-time, then dest.innerStride() might be wrong. (bug 1166)
|
||||
actualAlpha);
|
||||
}
|
||||
};
|
||||
|
@ -496,7 +496,7 @@ template<typename Scalar, bool IsInteger>
|
||||
struct pow_default_impl
|
||||
{
|
||||
typedef Scalar retval;
|
||||
static inline Scalar run(const Scalar& x, const Scalar& y)
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y)
|
||||
{
|
||||
EIGEN_USING_STD_MATH(pow);
|
||||
return pow(x, y);
|
||||
@ -506,7 +506,7 @@ struct pow_default_impl
|
||||
template<typename Scalar>
|
||||
struct pow_default_impl<Scalar, true>
|
||||
{
|
||||
static inline Scalar run(Scalar x, Scalar y)
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(Scalar x, Scalar y)
|
||||
{
|
||||
Scalar res(1);
|
||||
eigen_assert(!NumTraits<Scalar>::IsSigned || y >= 0);
|
||||
|
@ -13,7 +13,7 @@
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived>
|
||||
inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
|
||||
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other)
|
||||
{
|
||||
typedef typename Derived::PlainObject PlainObject;
|
||||
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar>());
|
||||
@ -21,7 +21,7 @@ inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
|
||||
EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
|
||||
{
|
||||
typedef typename Derived::PlainObject PlainObject;
|
||||
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar>());
|
||||
@ -29,7 +29,7 @@ inline Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
|
||||
EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
|
||||
{
|
||||
typedef typename Derived::PlainObject PlainObject;
|
||||
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar>());
|
||||
@ -37,7 +37,7 @@ inline Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
|
||||
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other)
|
||||
{
|
||||
typedef typename Derived::PlainObject PlainObject;
|
||||
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar>());
|
||||
|
@ -139,6 +139,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
|
||||
if ( svd.matrixU().determinant() * svd.matrixV().determinant() < 0 )
|
||||
S(m-1) = -1;
|
||||
|
||||
// Eq. (40) and (43)
|
||||
Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
|
||||
|
||||
if (with_scaling)
|
||||
|
@ -387,7 +387,10 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef evaluator<MatrixType> MatEval;
|
||||
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
|
||||
|
||||
MatEval matEval(mat);
|
||||
Dest& dest(_dest.derived());
|
||||
enum {
|
||||
StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
|
||||
@ -401,7 +404,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
||||
for(Index j = 0; j<size; ++j)
|
||||
{
|
||||
Index jp = perm ? perm[j] : j;
|
||||
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
|
||||
for(MatIterator it(matEval,j); it; ++it)
|
||||
{
|
||||
Index i = it.index();
|
||||
Index r = it.row();
|
||||
@ -431,7 +434,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
||||
// copy data
|
||||
for(StorageIndex j = 0; j<size; ++j)
|
||||
{
|
||||
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
|
||||
for(MatIterator it(matEval,j); it; ++it)
|
||||
{
|
||||
StorageIndex i = internal::convert_index<StorageIndex>(it.index());
|
||||
Index r = it.row();
|
||||
@ -474,12 +477,17 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef evaluator<MatrixType> MatEval;
|
||||
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
|
||||
|
||||
enum {
|
||||
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
|
||||
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
|
||||
DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
|
||||
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
|
||||
};
|
||||
|
||||
MatEval matEval(mat);
|
||||
|
||||
Index size = mat.rows();
|
||||
VectorI count(size);
|
||||
@ -488,7 +496,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
for(StorageIndex j = 0; j<size; ++j)
|
||||
{
|
||||
StorageIndex jp = perm ? perm[j] : j;
|
||||
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
|
||||
for(MatIterator it(matEval,j); it; ++it)
|
||||
{
|
||||
StorageIndex i = it.index();
|
||||
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
||||
@ -508,7 +516,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
for(StorageIndex j = 0; j<size; ++j)
|
||||
{
|
||||
|
||||
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
|
||||
for(MatIterator it(matEval,j); it; ++it)
|
||||
{
|
||||
StorageIndex i = it.index();
|
||||
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
||||
|
@ -43,9 +43,6 @@ template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<Matrix
|
||||
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)
|
||||
|
||||
class InnerIterator;
|
||||
class ReverseInnerIterator;
|
||||
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
|
||||
@ -63,108 +60,6 @@ template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<Matrix
|
||||
|
||||
};
|
||||
|
||||
template<typename MatrixType, unsigned int Mode>
|
||||
class TriangularViewImpl<MatrixType,Mode,Sparse>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
|
||||
{
|
||||
typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const TriangularViewImpl& view, Index outer)
|
||||
: Base(view.derived().nestedExpression(), outer), m_returnOne(false)
|
||||
{
|
||||
if(SkipFirst)
|
||||
{
|
||||
while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
|
||||
Base::operator++();
|
||||
if(HasUnitDiag)
|
||||
m_returnOne = true;
|
||||
}
|
||||
else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
|
||||
{
|
||||
if((!SkipFirst) && Base::operator bool())
|
||||
Base::operator++();
|
||||
m_returnOne = true;
|
||||
}
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne)
|
||||
m_returnOne = false;
|
||||
else
|
||||
{
|
||||
Base::operator++();
|
||||
if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
|
||||
{
|
||||
if((!SkipFirst) && Base::operator bool())
|
||||
Base::operator++();
|
||||
m_returnOne = true;
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); }
|
||||
inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); }
|
||||
inline StorageIndex index() const
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne) return Base::outer();
|
||||
else return Base::index();
|
||||
}
|
||||
inline Scalar value() const
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne) return Scalar(1);
|
||||
else return Base::value();
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne)
|
||||
return true;
|
||||
if(SkipFirst) return Base::operator bool();
|
||||
else
|
||||
{
|
||||
if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
|
||||
else return (Base::operator bool() && this->index() <= this->outer());
|
||||
}
|
||||
}
|
||||
protected:
|
||||
bool m_returnOne;
|
||||
};
|
||||
|
||||
template<typename MatrixType, unsigned int Mode>
|
||||
class TriangularViewImpl<MatrixType,Mode,Sparse>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
|
||||
{
|
||||
typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE ReverseInnerIterator(const TriangularViewType& view, Index outer)
|
||||
: Base(view.derived().nestedExpression(), outer)
|
||||
{
|
||||
eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
|
||||
if(SkipLast) {
|
||||
while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer))
|
||||
--(*this);
|
||||
}
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
|
||||
{ Base::operator--(); return *this; }
|
||||
|
||||
inline Index row() const { return Base::row(); }
|
||||
inline Index col() const { return Base::col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const
|
||||
{
|
||||
if (SkipLast) return Base::operator bool() ;
|
||||
else
|
||||
{
|
||||
if(SkipDiag) return (Base::operator bool() && this->index() > this->outer());
|
||||
else return (Base::operator bool() && this->index() >= this->outer());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename ArgType, unsigned int Mode>
|
||||
@ -193,7 +88,7 @@ public:
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()) {}
|
||||
explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_argImpl.nonZerosEstimate();
|
||||
@ -205,20 +100,20 @@ public:
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer)
|
||||
: Base(xprEval.m_argImpl,outer), m_returnOne(false)
|
||||
: Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()<xprEval.m_arg.innerSize())
|
||||
{
|
||||
if(SkipFirst)
|
||||
{
|
||||
while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
|
||||
Base::operator++();
|
||||
if(HasUnitDiag)
|
||||
m_returnOne = true;
|
||||
m_returnOne = m_containsDiag;
|
||||
}
|
||||
else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
|
||||
{
|
||||
if((!SkipFirst) && Base::operator bool())
|
||||
Base::operator++();
|
||||
m_returnOne = true; // FIXME check innerSize()>outer();
|
||||
m_returnOne = m_containsDiag;
|
||||
}
|
||||
}
|
||||
|
||||
@ -233,7 +128,7 @@ public:
|
||||
{
|
||||
if((!SkipFirst) && Base::operator bool())
|
||||
Base::operator++();
|
||||
m_returnOne = true; // FIXME check innerSize()>outer();
|
||||
m_returnOne = m_containsDiag;
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
@ -266,12 +161,14 @@ public:
|
||||
|
||||
protected:
|
||||
bool m_returnOne;
|
||||
bool m_containsDiag;
|
||||
private:
|
||||
Scalar& valueRef();
|
||||
};
|
||||
|
||||
protected:
|
||||
evaluator<ArgType> m_argImpl;
|
||||
const ArgType& m_arg;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
@ -73,13 +73,13 @@ for operator<".
|
||||
The reason that the \c template keyword is necessary in the last example has to do with the rules for how
|
||||
templates are supposed to be compiled in C++. The compiler has to check the code for correct syntax at the
|
||||
point where the template is defined, without knowing the actual value of the template arguments (\c Derived1
|
||||
and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularPart</tt> is
|
||||
and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularView</tt> is
|
||||
a member template and that the following < symbol is part of the delimiter for the template
|
||||
parameter. Another possibility would be that <tt>dst.triangularPart</tt> is a member variable with the <
|
||||
parameter. Another possibility would be that <tt>dst.triangularView</tt> is a member variable with the <
|
||||
symbol refering to the <tt>operator<()</tt> function. In fact, the compiler should choose the second
|
||||
possibility, according to the standard. If <tt>dst.triangularPart</tt> is a member template (as in our case),
|
||||
possibility, according to the standard. If <tt>dst.triangularView</tt> is a member template (as in our case),
|
||||
the programmer should specify this explicitly with the \c template keyword and write <tt>dst.template
|
||||
triangularPart</tt>.
|
||||
triangularView</tt>.
|
||||
|
||||
The precise rules are rather complicated, but ignoring some subtleties we can summarize them as follows:
|
||||
- A <em>dependent name</em> is name that depends (directly or indirectly) on a template parameter. In the
|
||||
|
@ -7,8 +7,8 @@ Hello! You are seeing this webpage because your program terminated on an asserti
|
||||
my_program: path/to/eigen/Eigen/src/Core/DenseStorage.h:44:
|
||||
Eigen::internal::matrix_array<T, Size, MatrixOptions, Align>::internal::matrix_array()
|
||||
[with T = double, int Size = 2, int MatrixOptions = 2, bool Align = true]:
|
||||
Assertion `(reinterpret_cast<size_t>(array) & 0xf) == 0 && "this assertion
|
||||
is explained here: http://eigen.tuxfamily.org/dox/UnalignedArrayAssert.html
|
||||
Assertion `(reinterpret_cast<size_t>(array) & (sizemask)) == 0 && "this assertion
|
||||
is explained here: http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html
|
||||
**** READ THIS WEB PAGE !!! ****"' failed.
|
||||
</pre>
|
||||
|
||||
@ -46,9 +46,9 @@ then you need to read this separate page: \ref TopicStructHavingEigenMembers "St
|
||||
|
||||
Note that here, Eigen::Vector2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
|
||||
|
||||
\section c2 Cause 2: STL Containers
|
||||
\section c2 Cause 2: STL Containers or manual memory allocation
|
||||
|
||||
If you use STL Containers such as std::vector, std::map, ..., with Eigen objects, or with classes containing Eigen objects, like this,
|
||||
If you use STL Containers such as std::vector, std::map, ..., with %Eigen objects, or with classes containing %Eigen objects, like this,
|
||||
|
||||
\code
|
||||
std::vector<Eigen::Matrix2f> my_vector;
|
||||
@ -60,6 +60,8 @@ then you need to read this separate page: \ref TopicStlContainers "Using STL Con
|
||||
|
||||
Note that here, Eigen::Matrix2f is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
|
||||
|
||||
The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c std::make_shared or \c std::allocate_shared for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers".
|
||||
|
||||
\section c3 Cause 3: Passing Eigen objects by value
|
||||
|
||||
If some function in your code is getting an Eigen object passed by value, like this,
|
||||
@ -107,7 +109,10 @@ Two possibilities:
|
||||
128-bit alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
|
||||
</ul>
|
||||
|
||||
For more information, see <a href="http://eigen.tuxfamily.org/index.php?title=FAQ#I_disabled_vectorization.2C_but_I.27m_still_getting_annoyed_about_alignment_issues.21">this FAQ</a>.
|
||||
If you want to know why defining EIGEN_DONT_VECTORIZE does not by itself disable 128-bit alignment and the assertion, here's the explanation:
|
||||
|
||||
It doesn't disable the assertion, because otherwise code that runs fine without vectorization would suddenly crash when enabling vectorization.
|
||||
It doesn't disable 128bit alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
|
||||
|
||||
*/
|
||||
|
||||
|
@ -144,6 +144,22 @@ template<typename MatrixType> void product(const MatrixType& m)
|
||||
VERIFY_IS_APPROX(res.col(r).noalias() = square.adjoint() * square.col(r), (square.adjoint() * square.col(r)).eval());
|
||||
VERIFY_IS_APPROX(res.col(r).noalias() = square * square.col(r), (square * square.col(r)).eval());
|
||||
|
||||
// vector at runtime (see bug 1166)
|
||||
{
|
||||
RowSquareMatrixType ref(square);
|
||||
ColSquareMatrixType ref2(square2);
|
||||
ref = res = square;
|
||||
VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.col(0).transpose() * square.transpose(), (ref.row(0) = m1.col(0).transpose() * square.transpose()));
|
||||
VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.block(0,0,rows,1).transpose() * square.transpose(), (ref.row(0) = m1.col(0).transpose() * square.transpose()));
|
||||
VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.col(0).transpose() * square, (ref.row(0) = m1.col(0).transpose() * square));
|
||||
VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.block(0,0,rows,1).transpose() * square, (ref.row(0) = m1.col(0).transpose() * square));
|
||||
ref2 = res2 = square2;
|
||||
VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.row(0) * square2.transpose(), (ref2.row(0) = m1.row(0) * square2.transpose()));
|
||||
VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.block(0,0,1,cols) * square2.transpose(), (ref2.row(0) = m1.row(0) * square2.transpose()));
|
||||
VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.row(0) * square2, (ref2.row(0) = m1.row(0) * square2));
|
||||
VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.block(0,0,1,cols) * square2, (ref2.row(0) = m1.row(0) * square2));
|
||||
}
|
||||
|
||||
// inner product
|
||||
{
|
||||
Scalar x = square2.row(c) * square2.col(c2);
|
||||
|
@ -21,8 +21,8 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
|
||||
const Index rows = ref.rows();
|
||||
const Index cols = ref.cols();
|
||||
const Index inner = ref.innerSize();
|
||||
const Index outer = ref.outerSize();
|
||||
//const Index inner = ref.innerSize();
|
||||
//const Index outer = ref.outerSize();
|
||||
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
enum { Flags = SparseMatrixType::Flags };
|
||||
@ -327,7 +327,6 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
m3 = m2.template triangularView<Upper>();
|
||||
VERIFY_IS_APPROX(m3, refMat3);
|
||||
|
||||
if(inner>=outer) // FIXME this should be implemented for outer>inner as well
|
||||
{
|
||||
refMat3 = refMat2.template triangularView<UnitUpper>();
|
||||
m3 = m2.template triangularView<UnitUpper>();
|
||||
|
@ -48,7 +48,6 @@ void check_stddeque_matrix(const MatrixType& m)
|
||||
VERIFY_IS_APPROX(v[21], y);
|
||||
v.push_back(x);
|
||||
VERIFY_IS_APPROX(v[22], x);
|
||||
VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(MatrixType));
|
||||
|
||||
// do a lot of push_back such that the deque gets internally resized
|
||||
// (with memory reallocation)
|
||||
|
@ -170,6 +170,12 @@ class TensorBase<Derived, ReadOnlyAccessors>
|
||||
return unaryExpr(internal::scalar_abs_op<Scalar>());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>
|
||||
conjugate() const {
|
||||
return unaryExpr(internal::scalar_conjugate_op<Scalar>());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived>
|
||||
pow(Scalar exponent) const {
|
||||
|
@ -7,8 +7,6 @@
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// TODO(mdevin): Free the cuda memory.
|
||||
|
||||
#define EIGEN_TEST_NO_LONGDOUBLE
|
||||
#define EIGEN_TEST_NO_COMPLEX
|
||||
#define EIGEN_TEST_FUNC cxx11_tensor_cuda
|
||||
|
@ -48,6 +48,25 @@ static void test_abs()
|
||||
}
|
||||
|
||||
|
||||
static void test_conjugate()
|
||||
{
|
||||
Tensor<std::complex<float>, 1> data1(3);
|
||||
Tensor<std::complex<double>, 1> data2(3);
|
||||
Tensor<int, 1> data3(3);
|
||||
data1.setRandom();
|
||||
data2.setRandom();
|
||||
data3.setRandom();
|
||||
|
||||
Tensor<std::complex<float>, 1> conj1 = data1.conjugate();
|
||||
Tensor<std::complex<double>, 1> conj2 = data2.conjugate();
|
||||
Tensor<int, 1> conj3 = data3.conjugate();
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
VERIFY_IS_APPROX(conj1(i), std::conj(data1(i)));
|
||||
VERIFY_IS_APPROX(conj2(i), std::conj(data2(i)));
|
||||
VERIFY_IS_APPROX(conj3(i), data3(i));
|
||||
}
|
||||
}
|
||||
|
||||
static void test_contractions()
|
||||
{
|
||||
Tensor<std::complex<float>, 4> t_left(30, 50, 8, 31);
|
||||
@ -77,5 +96,6 @@ void test_cxx11_tensor_of_complex()
|
||||
{
|
||||
CALL_SUBTEST(test_additions());
|
||||
CALL_SUBTEST(test_abs());
|
||||
CALL_SUBTEST(test_conjugate());
|
||||
CALL_SUBTEST(test_contractions());
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user