Merged the latest version of the code from eigen/eigen

This commit is contained in:
Benoit Steiner 2014-02-18 18:51:24 -08:00
commit cbd7e98174
37 changed files with 504 additions and 223 deletions

View File

@ -4,14 +4,10 @@
## # The following are required to uses Dart and the Cdash dashboard
## ENABLE_TESTING()
## INCLUDE(CTest)
set(CTEST_PROJECT_NAME "Eigen")
set(CTEST_PROJECT_NAME "Eigen3.2")
set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC")
set(CTEST_DROP_METHOD "http")
set(CTEST_DROP_SITE "manao.inria.fr")
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen")
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen3.2")
set(CTEST_DROP_SITE_CDASH TRUE)
set(CTEST_PROJECT_SUBPROJECTS
Official
Unsupported
)

View File

@ -16,7 +16,10 @@
namespace Eigen {
namespace internal {
template<typename MatrixType, int UpLo> struct LDLT_Traits;
template<typename MatrixType, int UpLo> struct LDLT_Traits;
// PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef
enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite };
}
/** \ingroup Cholesky_Module
@ -69,7 +72,12 @@ template<typename _MatrixType, int _UpLo> class LDLT
* The default constructor is useful in cases in which the user intends to
* perform decompositions via LDLT::compute(const MatrixType&).
*/
LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {}
LDLT()
: m_matrix(),
m_transpositions(),
m_sign(internal::ZeroSign),
m_isInitialized(false)
{}
/** \brief Default Constructor with memory preallocation
*
@ -81,6 +89,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
: m_matrix(size, size),
m_transpositions(size),
m_temporary(size),
m_sign(internal::ZeroSign),
m_isInitialized(false)
{}
@ -93,6 +102,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
: m_matrix(matrix.rows(), matrix.cols()),
m_transpositions(matrix.rows()),
m_temporary(matrix.rows()),
m_sign(internal::ZeroSign),
m_isInitialized(false)
{
compute(matrix);
@ -139,7 +149,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
inline bool isPositive() const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
return m_sign == 1;
return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign;
}
#ifdef EIGEN2_SUPPORT
@ -153,7 +163,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
inline bool isNegative(void) const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
return m_sign == -1;
return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;
}
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
@ -235,7 +245,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
MatrixType m_matrix;
TranspositionType m_transpositions;
TmpMatrixType m_temporary;
int m_sign;
internal::SignMatrix m_sign;
bool m_isInitialized;
};
@ -246,7 +256,7 @@ template<int UpLo> struct ldlt_inplace;
template<> struct ldlt_inplace<Lower>
{
template<typename MatrixType, typename TranspositionType, typename Workspace>
static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
{
using std::abs;
typedef typename MatrixType::Scalar Scalar;
@ -258,8 +268,9 @@ template<> struct ldlt_inplace<Lower>
if (size <= 1)
{
transpositions.setIdentity();
if(sign)
*sign = numext::real(mat.coeff(0,0))>0 ? 1:-1;
if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef;
else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef;
else sign = ZeroSign;
return true;
}
@ -284,7 +295,6 @@ template<> struct ldlt_inplace<Lower>
if(biggest_in_corner < cutoff)
{
for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
if(sign) *sign = 0;
break;
}
@ -325,15 +335,15 @@ template<> struct ldlt_inplace<Lower>
}
if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
A21 /= mat.coeffRef(k,k);
if(sign)
{
// LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right
int newSign = numext::real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0;
if(k == 0)
*sign = newSign;
else if(*sign != newSign)
*sign = 0;
RealScalar realAkk = numext::real(mat.coeffRef(k,k));
if (sign == PositiveSemiDef) {
if (realAkk < 0) sign = Indefinite;
} else if (sign == NegativeSemiDef) {
if (realAkk > 0) sign = Indefinite;
} else if (sign == ZeroSign) {
if (realAkk > 0) sign = PositiveSemiDef;
else if (realAkk < 0) sign = NegativeSemiDef;
}
}
@ -399,7 +409,7 @@ template<> struct ldlt_inplace<Lower>
template<> struct ldlt_inplace<Upper>
{
template<typename MatrixType, typename TranspositionType, typename Workspace>
static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
{
Transpose<MatrixType> matt(mat);
return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
@ -445,7 +455,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
m_isInitialized = false;
m_temporary.resize(size);
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign);
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
m_isInitialized = true;
return *this;
@ -473,7 +483,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
for (Index i = 0; i < size; i++)
m_transpositions.coeffRef(i) = i;
m_temporary.resize(size);
m_sign = sigma>=0 ? 1 : -1;
m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;
m_isInitialized = true;
}

View File

@ -141,36 +141,6 @@ Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
return derived();
}
/** replaces \c *this by \c *this * \a other.
*
* \returns a reference to \c *this
*/
template<typename Derived>
template<typename OtherDerived>
inline Derived&
MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
return derived();
}
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
*/
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
}
/** replaces \c *this by \c *this * \a other. */
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheLeft(derived());
}
} // end namespace Eigen
#endif // EIGEN_EIGENBASE_H

View File

@ -564,6 +564,51 @@ template<typename Derived> class MatrixBase
{EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
};
/***************************************************************************
* Implementation of matrix base methods
***************************************************************************/
/** replaces \c *this by \c *this * \a other.
*
* \returns a reference to \c *this
*
* Example: \include MatrixBase_applyOnTheRight.cpp
* Output: \verbinclude MatrixBase_applyOnTheRight.out
*/
template<typename Derived>
template<typename OtherDerived>
inline Derived&
MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
return derived();
}
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
*
* Example: \include MatrixBase_applyOnTheRight.cpp
* Output: \verbinclude MatrixBase_applyOnTheRight.out
*/
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
}
/** replaces \c *this by \a other * \c *this.
*
* Example: \include MatrixBase_applyOnTheLeft.cpp
* Output: \verbinclude MatrixBase_applyOnTheLeft.out
*/
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheLeft(derived());
}
} // end namespace Eigen
#endif // EIGEN_MATRIXBASE_H

View File

@ -17,16 +17,29 @@ namespace internal {
template<typename ExpressionType, typename Scalar>
inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
{
Scalar max = bl.cwiseAbs().maxCoeff();
if (max>scale)
using std::max;
Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
if (maxCoeff>scale)
{
ssq = ssq * numext::abs2(scale/max);
scale = max;
invScale = Scalar(1)/scale;
ssq = ssq * numext::abs2(scale/maxCoeff);
Scalar tmp = Scalar(1)/maxCoeff;
if(tmp > NumTraits<Scalar>::highest())
{
invScale = NumTraits<Scalar>::highest();
scale = Scalar(1)/invScale;
}
else
{
scale = maxCoeff;
invScale = tmp;
}
}
// TODO if the max is much much smaller than the current scale,
// TODO if the maxCoeff is much much smaller than the current scale,
// then we can neglect this sub vector
ssq += (bl*invScale).squaredNorm();
if(scale>Scalar(0)) // if scale==0, then bl is 0
ssq += (bl*invScale).squaredNorm();
}
template<typename Derived>

View File

@ -608,7 +608,7 @@ template<typename T> class aligned_stack_memory_handler
*/
#ifdef EIGEN_ALLOCA
// The native alloca() that comes with llvm aligns buffer on 16 bytes even when AVX is enabled.
#if defined(__arm__) || EIGEN_ALIGN_BYTES > 16
#if defined(__arm__) || defined(_WIN32) || EIGEN_ALIGN_BYTES > 16
#define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+EIGEN_ALIGN_BYTES)) & ~(size_t(EIGEN_ALIGN_BYTES-1))) + EIGEN_ALIGN_BYTES)
#else
#define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
@ -761,11 +761,27 @@ public:
::new( p ) T( value );
}
#if (__cplusplus >= 201103L)
template <typename U, typename... Args>
void construct( U* u, Args&&... args)
{
::new( static_cast<void*>(u) ) U( std::forward<Args>( args )... );
}
#endif
void destroy( pointer p )
{
p->~T();
}
#if (__cplusplus >= 201103L)
template <typename U>
void destroy( U* u )
{
u->~U();
}
#endif
void deallocate( pointer p, size_type /*num*/ )
{
internal::aligned_free( p );

View File

@ -530,9 +530,9 @@ public:
inline Transform& operator=(const UniformScaling<Scalar>& t);
inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry)> operator*(const UniformScaling<Scalar>& s) const
inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator*(const UniformScaling<Scalar>& s) const
{
Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry),Options> res = *this;
Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode),Options> res = *this;
res.scale(s.factor());
return res;
}
@ -699,9 +699,13 @@ template<typename Scalar, int Dim, int Mode,int Options>
Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QMatrix& other)
{
EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
m_matrix << other.m11(), other.m21(), other.dx(),
other.m12(), other.m22(), other.dy(),
0, 0, 1;
if (Mode == int(AffineCompact))
m_matrix << other.m11(), other.m21(), other.dx(),
other.m12(), other.m22(), other.dy();
else
m_matrix << other.m11(), other.m21(), other.dx(),
other.m12(), other.m22(), other.dy(),
0, 0, 1;
return *this;
}

View File

@ -66,9 +66,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
}
// unordered insertion
for(int k=0; k<nnz; ++k)
for(Index k=0; k<nnz; ++k)
{
int i = indices[k];
Index i = indices[k];
res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
@ -76,8 +76,8 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
#if 0
// alternative ordered insertion code:
int t200 = rows/(log2(200)*1.39);
int t = (rows*100)/139;
Index t200 = rows/(log2(200)*1.39);
Index t = (rows*100)/139;
// FIXME reserve nnz non zeros
// FIXME implement fast sort algorithms for very small nnz
@ -90,9 +90,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
if(true)
{
if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
for(int k=0; k<nnz; ++k)
for(Index k=0; k<nnz; ++k)
{
int i = indices[k];
Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
mask[i] = false;
}
@ -100,7 +100,7 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
else
{
// dense path
for(int i=0; i<rows; ++i)
for(Index i=0; i<rows; ++i)
{
if(mask[i])
{
@ -134,8 +134,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
// sort the non zeros:
@ -149,7 +149,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix rhsRow = rhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
@ -162,7 +162,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix lhsRow = lhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
@ -175,7 +175,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
res = resRow;
@ -190,7 +190,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
res = resCol;
@ -202,7 +202,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix lhsCol = lhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
@ -215,7 +215,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix rhsCol = rhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
@ -228,8 +228,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
// sort the non zeros:

View File

@ -335,6 +335,14 @@ const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::inner
}
namespace internal {
template< typename XprType, int BlockRows, int BlockCols, bool InnerPanel,
bool OuterVector = (BlockCols==1 && XprType::IsRowMajor) || (BlockRows==1 && !XprType::IsRowMajor)>
class GenericSparseBlockInnerIteratorImpl;
}
/** Generic implementation of sparse Block expression.
* Real-only.
*/
@ -342,11 +350,12 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
{
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
/** Column or Row constructor
*/
@ -354,8 +363,8 @@ public:
: m_matrix(xpr),
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
m_blockRows(xpr.rows()),
m_blockCols(xpr.cols())
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{}
/** Dynamic-size constructor
@ -394,29 +403,8 @@ public:
inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
class InnerIterator : public _MatrixTypeNested::InnerIterator
{
typedef typename _MatrixTypeNested::InnerIterator Base;
const BlockType& m_block;
Index m_end;
public:
EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer)
: Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
m_block(block),
m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
{
while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) )
Base::operator++();
}
inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
inline Index row() const { return Base::row() - m_block.m_startRow.value(); }
inline Index col() const { return Base::col() - m_block.m_startCol.value(); }
inline operator bool() const { return Base::operator bool() && Base::index() < m_end; }
};
typedef internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel> InnerIterator;
class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator
{
typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
@ -441,7 +429,7 @@ public:
inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; }
};
protected:
friend class InnerIterator;
friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
friend class ReverseInnerIterator;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
@ -454,6 +442,100 @@ public:
};
namespace internal {
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
class GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel,false> : public Block<XprType, BlockRows, BlockCols, InnerPanel>::_MatrixTypeNested::InnerIterator
{
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
enum {
IsRowMajor = BlockType::IsRowMajor
};
typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested;
typedef typename BlockType::Index Index;
typedef typename _MatrixTypeNested::InnerIterator Base;
const BlockType& m_block;
Index m_end;
public:
EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer)
: Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
m_block(block),
m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
{
while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) )
Base::operator++();
}
inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
inline Index row() const { return Base::row() - m_block.m_startRow.value(); }
inline Index col() const { return Base::col() - m_block.m_startCol.value(); }
inline operator bool() const { return Base::operator bool() && Base::index() < m_end; }
};
// Row vector of a column-major sparse matrix or column of a row-major one.
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
class GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel,true>
{
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
enum {
IsRowMajor = BlockType::IsRowMajor
};
typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested;
typedef typename BlockType::Index Index;
typedef typename BlockType::Scalar Scalar;
const BlockType& m_block;
Index m_outerPos;
Index m_innerIndex;
Scalar m_value;
Index m_end;
public:
EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer = 0)
:
m_block(block),
m_outerPos( (IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) - 1), // -1 so that operator++ finds the first non-zero entry
m_innerIndex(IsRowMajor ? block.m_startRow.value() : block.m_startCol.value()),
m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
++(*this);
}
inline Index index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
inline Index outer() const { return 0; }
inline Index row() const { return IsRowMajor ? 0 : index(); }
inline Index col() const { return IsRowMajor ? index() : 0; }
inline Scalar value() const { return m_value; }
inline GenericSparseBlockInnerIteratorImpl& operator++()
{
// search next non-zero entry
while(m_outerPos<m_end)
{
m_outerPos++;
typename XprType::InnerIterator it(m_block.m_matrix, m_outerPos);
// search for the key m_innerIndex in the current outer-vector
while(it && it.index() < m_innerIndex) ++it;
if(it && it.index()==m_innerIndex)
{
m_value = it.value();
break;
}
}
return *this;
}
inline operator bool() const { return m_outerPos < m_end; }
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSE_BLOCK_H

View File

@ -125,7 +125,7 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes
inline Scalar value() const { return Base::value() * m_factor; }
protected:
int m_outer;
Index m_outer;
Scalar m_factor;
};
@ -156,7 +156,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
{
for(Index c=0; c<rhs.cols(); ++c)
{
int n = lhs.outerSize();
Index n = lhs.outerSize();
for(Index j=0; j<n; ++j)
{
typename Res::Scalar tmp(0);

View File

@ -402,7 +402,7 @@ class SparseMatrix
* \sa insertBack, insertBackByOuterInner */
inline void startVec(Index outer)
{
eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
m_outerIndex[outer+1] = m_outerIndex[outer];
}
@ -480,7 +480,7 @@ class SparseMatrix
if(m_innerNonZeros != 0)
return;
m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
for (int i = 0; i < m_outerSize; i++)
for (Index i = 0; i < m_outerSize; i++)
{
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
@ -752,8 +752,8 @@ class SparseMatrix
else
for (Index i=0; i<m.outerSize(); ++i)
{
int p = m.m_outerIndex[i];
int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index p = m.m_outerIndex[i];
Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
for (; k<pe; ++k)
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
@ -1022,7 +1022,7 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
wi.fill(-1);
Index count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
for(int j=0; j<outerSize(); ++j)
for(Index j=0; j<outerSize(); ++j)
{
Index start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];

View File

@ -302,8 +302,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
}
else
{
SparseMatrix<Scalar, RowMajorBit> trans = m;
s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit> >&>(trans);
SparseMatrix<Scalar, RowMajorBit, Index> trans = m;
s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, Index> >&>(trans);
}
}
return s;

View File

@ -16,6 +16,7 @@ template<typename Lhs, typename Rhs>
struct SparseSparseProductReturnType
{
typedef typename internal::traits<Lhs>::Scalar Scalar;
typedef typename internal::traits<Lhs>::Index Index;
enum {
LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
@ -24,11 +25,11 @@ struct SparseSparseProductReturnType
};
typedef typename internal::conditional<TransposeLhs,
SparseMatrix<Scalar,0>,
SparseMatrix<Scalar,0,Index>,
typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
typedef typename internal::conditional<TransposeRhs,
SparseMatrix<Scalar,0>,
SparseMatrix<Scalar,0,Index>,
typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
typedef SparseSparseProduct<LhsNested, RhsNested> Type;

View File

@ -27,7 +27,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
//int size = lhs.outerSize();
//Index size = lhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
@ -100,7 +100,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// we need a col-major matrix to hold the result
typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> SparseTemporaryType;
SparseTemporaryType _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
res = _res;
@ -126,10 +126,11 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
ColMajorMatrix colLhs(lhs);
ColMajorMatrix colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance);
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs;
ColMajorMatrixLhs colLhs(lhs);
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
// let's transpose the product to get a column x column product
// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;

View File

@ -70,7 +70,7 @@ Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index
if(num_expansions == 0 || keep_prev)
new_len = length ; // First time allocate requested
else
new_len = Index(alpha * length);
new_len = (std::max)(length+1,Index(alpha * length));
VectorType old_vec; // Temporary vector to hold the previous values
if (nbElts > 0 )
@ -107,7 +107,7 @@ Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index
do
{
alpha = (alpha + 1)/2;
new_len = Index(alpha * length);
new_len = (std::max)(length+1,Index(alpha * length));
#ifdef EIGEN_EXCEPTIONS
try
#endif

View File

@ -32,25 +32,25 @@ EIGEN_DONT_INLINE typename T::Scalar lapackNorm(T& v)
Scalar ssq = 1;
for (int i=0;i<n;++i)
{
Scalar ax = internal::abs(v.coeff(i));
Scalar ax = std::abs(v.coeff(i));
if (scale >= ax)
{
ssq += internal::abs2(ax/scale);
ssq += numext::abs2(ax/scale);
}
else
{
ssq = Scalar(1) + ssq * internal::abs2(scale/ax);
ssq = Scalar(1) + ssq * numext::abs2(scale/ax);
scale = ax;
}
}
return scale * internal::sqrt(ssq);
return scale * std::sqrt(ssq);
}
template<typename T>
EIGEN_DONT_INLINE typename T::Scalar twopassNorm(T& v)
{
typedef typename T::Scalar Scalar;
Scalar s = v.cwise().abs().maxCoeff();
Scalar s = v.array().abs().maxCoeff();
return s*(v/s).norm();
}
@ -73,16 +73,20 @@ EIGEN_DONT_INLINE typename T::Scalar divacNorm(T& v)
v(i) = v(2*i) + v(2*i+1);
n = n/2;
}
return internal::sqrt(v(0));
return std::sqrt(v(0));
}
namespace Eigen {
namespace internal {
#ifdef EIGEN_VECTORIZE
Packet4f internal::plt(const Packet4f& a, Packet4f& b) { return _mm_cmplt_ps(a,b); }
Packet2d internal::plt(const Packet2d& a, Packet2d& b) { return _mm_cmplt_pd(a,b); }
Packet4f plt(const Packet4f& a, Packet4f& b) { return _mm_cmplt_ps(a,b); }
Packet2d plt(const Packet2d& a, Packet2d& b) { return _mm_cmplt_pd(a,b); }
Packet4f internal::pandnot(const Packet4f& a, Packet4f& b) { return _mm_andnot_ps(a,b); }
Packet2d internal::pandnot(const Packet2d& a, Packet2d& b) { return _mm_andnot_pd(a,b); }
Packet4f pandnot(const Packet4f& a, Packet4f& b) { return _mm_andnot_ps(a,b); }
Packet2d pandnot(const Packet2d& a, Packet2d& b) { return _mm_andnot_pd(a,b); }
#endif
}
}
template<typename T>
EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
@ -126,7 +130,7 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
overfl = rbig*s2m; // overfow boundary for abig
eps = std::pow(ibeta, 1-it);
relerr = internal::sqrt(eps); // tolerance for neglecting asml
relerr = std::sqrt(eps); // tolerance for neglecting asml
abig = 1.0/eps - 1.0;
if (Scalar(nbig)>abig) nmax = abig; // largest safe n
else nmax = nbig;
@ -134,13 +138,13 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
typedef typename internal::packet_traits<Scalar>::type Packet;
const int ps = internal::packet_traits<Scalar>::size;
Packet pasml = internal::pset1(Scalar(0));
Packet pamed = internal::pset1(Scalar(0));
Packet pabig = internal::pset1(Scalar(0));
Packet ps2m = internal::pset1(s2m);
Packet ps1m = internal::pset1(s1m);
Packet pb2 = internal::pset1(b2);
Packet pb1 = internal::pset1(b1);
Packet pasml = internal::pset1<Packet>(Scalar(0));
Packet pamed = internal::pset1<Packet>(Scalar(0));
Packet pabig = internal::pset1<Packet>(Scalar(0));
Packet ps2m = internal::pset1<Packet>(s2m);
Packet ps1m = internal::pset1<Packet>(s1m);
Packet pb2 = internal::pset1<Packet>(b2);
Packet pb1 = internal::pset1<Packet>(b1);
for(int j=0; j<v.size(); j+=ps)
{
Packet ax = internal::pabs(v.template packet<Aligned>(j));
@ -170,7 +174,7 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
Scalar amed = internal::predux(pamed);
if(abig > Scalar(0))
{
abig = internal::sqrt(abig);
abig = std::sqrt(abig);
if(abig > overfl)
{
eigen_assert(false && "overflow");
@ -179,7 +183,7 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
if(amed > Scalar(0))
{
abig = abig/s2m;
amed = internal::sqrt(amed);
amed = std::sqrt(amed);
}
else
{
@ -191,24 +195,24 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
{
if (amed > Scalar(0))
{
abig = internal::sqrt(amed);
amed = internal::sqrt(asml) / s1m;
abig = std::sqrt(amed);
amed = std::sqrt(asml) / s1m;
}
else
{
return internal::sqrt(asml)/s1m;
return std::sqrt(asml)/s1m;
}
}
else
{
return internal::sqrt(amed);
return std::sqrt(amed);
}
asml = std::min(abig, amed);
abig = std::max(abig, amed);
if(asml <= abig*relerr)
return abig;
else
return abig * internal::sqrt(Scalar(1) + internal::abs2(asml/abig));
return abig * std::sqrt(Scalar(1) + numext::abs2(asml/abig));
#endif
}
@ -224,22 +228,22 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
for (int i=0; i<iters; ++i) NRM(vd); \
td.stop(); \
} \
for (int k=0; k<std::max(1,tries/3); ++k) { \
/*for (int k=0; k<std::max(1,tries/3); ++k) { \
tcf.start(); \
for (int i=0; i<iters; ++i) NRM(vcf); \
tcf.stop(); \
} \
} */\
std::cout << #NRM << "\t" << tf.value() << " " << td.value() << " " << tcf.value() << "\n"; \
}
void check_accuracy(double basef, double based, int s)
{
double yf = basef * internal::abs(internal::random<double>());
double yd = based * internal::abs(internal::random<double>());
double yf = basef * std::abs(internal::random<double>());
double yd = based * std::abs(internal::random<double>());
VectorXf vf = VectorXf::Ones(s) * yf;
VectorXd vd = VectorXd::Ones(s) * yd;
std::cout << "reference\t" << internal::sqrt(double(s))*yf << "\t" << internal::sqrt(double(s))*yd << "\n";
std::cout << "reference\t" << std::sqrt(double(s))*yf << "\t" << std::sqrt(double(s))*yd << "\n";
std::cout << "sqsumNorm\t" << sqsumNorm(vf) << "\t" << sqsumNorm(vd) << "\n";
std::cout << "hypotNorm\t" << hypotNorm(vf) << "\t" << hypotNorm(vd) << "\n";
std::cout << "blueNorm\t" << blueNorm(vf) << "\t" << blueNorm(vd) << "\n";
@ -255,8 +259,8 @@ void check_accuracy_var(int ef0, int ef1, int ed0, int ed1, int s)
VectorXd vd(s);
for (int i=0; i<s; ++i)
{
vf[i] = internal::abs(internal::random<double>()) * std::pow(double(10), internal::random<int>(ef0,ef1));
vd[i] = internal::abs(internal::random<double>()) * std::pow(double(10), internal::random<int>(ed0,ed1));
vf[i] = std::abs(internal::random<double>()) * std::pow(double(10), internal::random<int>(ef0,ef1));
vd[i] = std::abs(internal::random<double>()) * std::pow(double(10), internal::random<int>(ed0,ed1));
}
//std::cout << "reference\t" << internal::sqrt(double(s))*yf << "\t" << internal::sqrt(double(s))*yd << "\n";
@ -321,10 +325,10 @@ int main(int argc, char** argv)
VectorXcf vcf = VectorXcf::Random(1024*1024*32) * y;
BENCH_PERF(sqsumNorm);
BENCH_PERF(blueNorm);
// BENCH_PERF(pblueNorm);
// BENCH_PERF(lapackNorm);
// BENCH_PERF(hypotNorm);
// BENCH_PERF(twopassNorm);
BENCH_PERF(pblueNorm);
BENCH_PERF(lapackNorm);
BENCH_PERF(hypotNorm);
BENCH_PERF(twopassNorm);
BENCH_PERF(bl2passNorm);
}
@ -336,10 +340,10 @@ int main(int argc, char** argv)
VectorXcf vcf = VectorXcf::Random(512) * y;
BENCH_PERF(sqsumNorm);
BENCH_PERF(blueNorm);
// BENCH_PERF(pblueNorm);
// BENCH_PERF(lapackNorm);
// BENCH_PERF(hypotNorm);
// BENCH_PERF(twopassNorm);
BENCH_PERF(pblueNorm);
BENCH_PERF(lapackNorm);
BENCH_PERF(hypotNorm);
BENCH_PERF(twopassNorm);
BENCH_PERF(bl2passNorm);
}
}

View File

@ -7,6 +7,9 @@ workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS)
if(EIGEN_Fortran_COMPILER_WORKS)
enable_language(Fortran OPTIONAL)
if(NOT CMAKE_Fortran_COMPILER)
set(EIGEN_Fortran_COMPILER_WORKS OFF)
endif()
endif()
add_custom_target(blas)

View File

@ -23,7 +23,7 @@ function(workaround_9220 language language_works)
#message("DEBUG: language = ${language}")
set(text
"project(test NONE)
cmake_minimum_required(VERSION 2.6.0)
cmake_minimum_required(VERSION 2.8.0)
set (CMAKE_Fortran_FLAGS \"${CMAKE_Fortran_FLAGS}\")
set (CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS}\")
enable_language(${language} OPTIONAL)

View File

@ -163,13 +163,25 @@ x.squaredNorm() // dot(x, x) Note the equivalence is not true for co
x.dot(y) // dot(x, y)
x.cross(y) // cross(x, y) Requires #include <Eigen/Geometry>
//// Type conversion
// Eigen // Matlab
A.cast<double>(); // double(A)
A.cast<float>(); // single(A)
A.cast<int>(); // int32(A)
// if the original type equals destination type, no work is done
// Note that for most operations Eigen requires all operands to have the same type:
MatrixXf F = MatrixXf::Zero(3,3);
A += F; // illegal in Eigen. In Matlab A = A+F is allowed
A += F.cast<double>(); // F converted to double and then added (generally, conversion happens on-the-fly)
// Eigen can map existing memory into Eigen matrices.
float array[3];
Map<Vector3f>(array, 3).fill(10);
int data[4] = 1, 2, 3, 4;
Matrix2i mat2x2(data);
MatrixXi mat2x2 = Map<Matrix2i>(data);
MatrixXi mat2x2 = Map<MatrixXi>(data, 2, 2);
Vector3f::Map(array).fill(10); // create a temporary Map over array and sets entries to 10
int data[4] = {1, 2, 3, 4};
Matrix2i mat2x2(data); // copies data into mat2x2
Matrix2i::Map(data) = 2*mat2x2; // overwrite elements of data with 2*mat2x2
MatrixXi::Map(data, 2, 2) += mat2x2; // adds mat2x2 to elements of data (alternative syntax if size is not know at compile time)
// Solve Ax = b. Result stored in x. Matlab: x = A \ b.
x = A.ldlt().solve(b)); // A sym. p.s.d. #include <Eigen/Cholesky>

View File

@ -0,0 +1,7 @@
Matrix3f A = Matrix3f::Random(3,3), B;
B << 0,1,0,
0,0,1,
1,0,0;
cout << "At start, A = " << endl << A << endl;
A.applyOnTheLeft(B);
cout << "After applyOnTheLeft, A = " << endl << A << endl;

View File

@ -0,0 +1,9 @@
Matrix3f A = Matrix3f::Random(3,3), B;
B << 0,1,0,
0,0,1,
1,0,0;
cout << "At start, A = " << endl << A << endl;
A *= B;
cout << "After A *= B, A = " << endl << A << endl;
A.applyOnTheRight(B); // equivalent to A *= B
cout << "After applyOnTheRight, A = " << endl << A << endl;

View File

@ -7,6 +7,9 @@ workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS)
if(EIGEN_Fortran_COMPILER_WORKS)
enable_language(Fortran OPTIONAL)
if(NOT CMAKE_Fortran_COMPILER)
set(EIGEN_Fortran_COMPILER_WORKS OFF)
endif()
endif()
add_custom_target(lapack)

View File

@ -263,8 +263,8 @@ template<typename MatrixType> void cholesky_bug241(const MatrixType& m)
// LDLT is not guaranteed to work for indefinite matrices, but happens to work fine if matrix is diagonal.
// This test checks that LDLT reports correctly that matrix is indefinite.
// See http://forum.kde.org/viewtopic.php?f=74&t=106942
template<typename MatrixType> void cholesky_indefinite(const MatrixType& m)
// See http://forum.kde.org/viewtopic.php?f=74&t=106942 and bug 736
template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
{
eigen_assert(m.rows() == 2 && m.cols() == 2);
MatrixType mat;
@ -280,6 +280,24 @@ template<typename MatrixType> void cholesky_indefinite(const MatrixType& m)
VERIFY(!ldlt.isNegative());
VERIFY(!ldlt.isPositive());
}
{
mat << 0, 0, 0, 0;
LDLT<MatrixType> ldlt(mat);
VERIFY(ldlt.isNegative());
VERIFY(ldlt.isPositive());
}
{
mat << 0, 0, 0, 1;
LDLT<MatrixType> ldlt(mat);
VERIFY(!ldlt.isNegative());
VERIFY(ldlt.isPositive());
}
{
mat << -1, 0, 0, 0;
LDLT<MatrixType> ldlt(mat);
VERIFY(ldlt.isNegative());
VERIFY(!ldlt.isPositive());
}
}
template<typename MatrixType> void cholesky_verify_assert()
@ -309,7 +327,7 @@ void test_cholesky()
CALL_SUBTEST_1( cholesky(Matrix<double,1,1>()) );
CALL_SUBTEST_3( cholesky(Matrix2d()) );
CALL_SUBTEST_3( cholesky_bug241(Matrix2d()) );
CALL_SUBTEST_3( cholesky_indefinite(Matrix2d()) );
CALL_SUBTEST_3( cholesky_definiteness(Matrix2d()) );
CALL_SUBTEST_4( cholesky(Matrix3f()) );
CALL_SUBTEST_5( cholesky(Matrix4d()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);

View File

@ -279,6 +279,13 @@ template<typename Scalar, int Mode, int Options> void transformations()
t1 = Eigen::Scaling(s0,s0,s0) * t1;
VERIFY_IS_APPROX(t0.matrix(), t1.matrix());
t0 = t3;
t0.scale(s0);
t1 = t3 * Eigen::Scaling(s0);
VERIFY_IS_APPROX(t0.matrix(), t1.matrix());
t0.prescale(s0);
t1 = Eigen::Scaling(s0) * t1;
VERIFY_IS_APPROX(t0.matrix(), t1.matrix());
t0.setIdentity();
t0.prerotate(q1).prescale(v0).pretranslate(v0);

View File

@ -54,6 +54,8 @@ template<typename MatrixType> void qr_invertible()
{
using std::log;
using std::abs;
using std::pow;
using std::max;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Scalar Scalar;
@ -65,7 +67,7 @@ template<typename MatrixType> void qr_invertible()
if (internal::is_same<RealScalar,float>::value)
{
// let's build a matrix more stable to inverse
MatrixType a = MatrixType::Random(size,size*2);
MatrixType a = MatrixType::Random(size,size*4);
m1 += a * a.adjoint();
}
@ -81,8 +83,11 @@ template<typename MatrixType> void qr_invertible()
m3 = qr.householderQ(); // get a unitary
m1 = m3 * m1 * m3;
qr.compute(m1);
VERIFY_IS_APPROX(absdet, qr.absDeterminant());
VERIFY_IS_APPROX(log(absdet), qr.logAbsDeterminant());
// This test is tricky if the determinant becomes too small.
// Since we generate random numbers with magnitude rrange [0,1], the average determinant is 0.5^size
VERIFY_IS_MUCH_SMALLER_THAN( abs(absdet-qr.absDeterminant()), (max)(RealScalar(pow(0.5,size)),(max)(abs(absdet),abs(qr.absDeterminant()))) );
}
template<typename MatrixType> void qr_verify_assert()

View File

@ -154,16 +154,16 @@ initSparse(double density,
sparseMat.finalize();
}
template<typename Scalar> void
template<typename Scalar,int Options,typename Index> void
initSparse(double density,
Matrix<Scalar,Dynamic,1>& refVec,
SparseVector<Scalar>& sparseVec,
SparseVector<Scalar,Options,Index>& sparseVec,
std::vector<int>* zeroCoords = 0,
std::vector<int>* nonzeroCoords = 0)
{
sparseVec.reserve(int(refVec.size()*density));
sparseVec.setZero();
for(int i=0; i<refVec.size(); i++)
for(Index i=0; i<refVec.size(); i++)
{
Scalar v = (internal::random<double>(0,1) < density) ? internal::random<Scalar>() : Scalar(0);
if (v!=Scalar(0))
@ -178,10 +178,10 @@ initSparse(double density,
}
}
template<typename Scalar> void
template<typename Scalar,int Options,typename Index> void
initSparse(double density,
Matrix<Scalar,1,Dynamic>& refVec,
SparseVector<Scalar,RowMajor>& sparseVec,
SparseVector<Scalar,Options,Index>& sparseVec,
std::vector<int>* zeroCoords = 0,
std::vector<int>* nonzeroCoords = 0)
{

View File

@ -270,6 +270,14 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.row(0).dot(refM2.row(0)));
else
VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.col(0).dot(refM2.row(0)));
DenseVector rv = DenseVector::Random(m1.cols());
DenseVector cv = DenseVector::Random(m1.rows());
Index r = internal::random<Index>(0,m1.rows()-2);
Index c = internal::random<Index>(0,m1.cols()-1);
VERIFY_IS_APPROX(( m1.template block<1,Dynamic>(r,0,1,m1.cols()).dot(rv)) , refM1.row(r).dot(rv));
VERIFY_IS_APPROX(m1.row(r).dot(rv), refM1.row(r).dot(rv));
VERIFY_IS_APPROX(m1.col(c).dot(cv), refM1.col(c).dot(cv));
VERIFY_IS_APPROX(m1.conjugate(), refM1.conjugate());
VERIFY_IS_APPROX(m1.real(), refM1.real());

View File

@ -13,8 +13,9 @@ template<typename SparseMatrixType, typename DenseMatrix, bool IsRowMajor=Sparse
template<typename SparseMatrixType, typename DenseMatrix> struct test_outer<SparseMatrixType,DenseMatrix,false> {
static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) {
int c = internal::random(0,m2.cols()-1);
int c1 = internal::random(0,m2.cols()-1);
typedef typename SparseMatrixType::Index Index;
Index c = internal::random<Index>(0,m2.cols()-1);
Index c1 = internal::random<Index>(0,m2.cols()-1);
VERIFY_IS_APPROX(m4=m2.col(c)*refMat2.col(c1).transpose(), refMat4=refMat2.col(c)*refMat2.col(c1).transpose());
VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.col(c).transpose(), refMat4=refMat2.col(c1)*refMat2.col(c).transpose());
}
@ -22,8 +23,9 @@ template<typename SparseMatrixType, typename DenseMatrix> struct test_outer<Spar
template<typename SparseMatrixType, typename DenseMatrix> struct test_outer<SparseMatrixType,DenseMatrix,true> {
static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) {
int r = internal::random(0,m2.rows()-1);
int c1 = internal::random(0,m2.cols()-1);
typedef typename SparseMatrixType::Index Index;
Index r = internal::random<Index>(0,m2.rows()-1);
Index c1 = internal::random<Index>(0,m2.cols()-1);
VERIFY_IS_APPROX(m4=m2.row(r).transpose()*refMat2.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*refMat2.col(c1).transpose());
VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.row(r), refMat4=refMat2.col(c1)*refMat2.row(r));
}
@ -37,9 +39,9 @@ template<typename SparseMatrixType> void sparse_product()
{
typedef typename SparseMatrixType::Index Index;
Index n = 100;
const Index rows = internal::random<int>(1,n);
const Index cols = internal::random<int>(1,n);
const Index depth = internal::random<int>(1,n);
const Index rows = internal::random<Index>(1,n);
const Index cols = internal::random<Index>(1,n);
const Index depth = internal::random<Index>(1,n);
typedef typename SparseMatrixType::Scalar Scalar;
enum { Flags = SparseMatrixType::Flags };
@ -244,6 +246,7 @@ void test_sparse_product()
CALL_SUBTEST_1( (sparse_product<SparseMatrix<double,RowMajor> >()) );
CALL_SUBTEST_2( (sparse_product<SparseMatrix<std::complex<double>, ColMajor > >()) );
CALL_SUBTEST_2( (sparse_product<SparseMatrix<std::complex<double>, RowMajor > >()) );
CALL_SUBTEST_3( (sparse_product<SparseMatrix<float,ColMajor,long int> >()) );
CALL_SUBTEST_4( (sparse_product_regression_test<SparseMatrix<double,RowMajor>, Matrix<double, Dynamic, Dynamic, RowMajor> >()) );
}
}

View File

@ -9,14 +9,14 @@
#include "sparse.h"
template<typename Scalar> void sparse_vector(int rows, int cols)
template<typename Scalar,typename Index> void sparse_vector(int rows, int cols)
{
double densityMat = (std::max)(8./(rows*cols), 0.01);
double densityVec = (std::max)(8./float(rows), 0.1);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
typedef SparseVector<Scalar> SparseVectorType;
typedef SparseMatrix<Scalar> SparseMatrixType;
typedef SparseVector<Scalar,0,Index> SparseVectorType;
typedef SparseMatrix<Scalar,0,Index> SparseMatrixType;
Scalar eps = 1e-6;
SparseMatrixType m1(rows,rows);
@ -101,9 +101,10 @@ template<typename Scalar> void sparse_vector(int rows, int cols)
void test_sparse_vector()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( sparse_vector<double>(8, 8) );
CALL_SUBTEST_2( sparse_vector<std::complex<double> >(16, 16) );
CALL_SUBTEST_1( sparse_vector<double>(299, 535) );
CALL_SUBTEST_1(( sparse_vector<double,int>(8, 8) ));
CALL_SUBTEST_2(( sparse_vector<std::complex<double>, int>(16, 16) ));
CALL_SUBTEST_1(( sparse_vector<double,long int>(299, 535) ));
CALL_SUBTEST_1(( sparse_vector<double,short>(299, 535) ));
}
}

View File

@ -55,8 +55,16 @@ template<typename MatrixType> void stable_norm(const MatrixType& m)
Index rows = m.rows();
Index cols = m.cols();
Scalar big = internal::random<Scalar>() * ((std::numeric_limits<RealScalar>::max)() * RealScalar(1e-4));
Scalar small = internal::random<Scalar>() * ((std::numeric_limits<RealScalar>::min)() * RealScalar(1e4));
// get a non-zero random factor
Scalar factor = internal::random<Scalar>();
while(numext::abs2(factor)<RealScalar(1e-4))
factor = internal::random<Scalar>();
Scalar big = factor * ((std::numeric_limits<RealScalar>::max)() * RealScalar(1e-4));
factor = internal::random<Scalar>();
while(numext::abs2(factor)<RealScalar(1e-4))
factor = internal::random<Scalar>();
Scalar small = factor * ((std::numeric_limits<RealScalar>::min)() * RealScalar(1e4));
MatrixType vzero = MatrixType::Zero(rows, cols),
vrand = MatrixType::Random(rows, cols),
@ -91,7 +99,7 @@ template<typename MatrixType> void stable_norm(const MatrixType& m)
VERIFY_IS_APPROX(vsmall.blueNorm(), sqrt(size)*abs(small));
VERIFY_IS_APPROX(vsmall.hypotNorm(), sqrt(size)*abs(small));
// Test compilation of cwise() version
// Test compilation of cwise() version
VERIFY_IS_APPROX(vrand.colwise().stableNorm(), vrand.colwise().norm());
VERIFY_IS_APPROX(vrand.colwise().blueNorm(), vrand.colwise().norm());
VERIFY_IS_APPROX(vrand.colwise().hypotNorm(), vrand.colwise().norm());

View File

@ -130,10 +130,11 @@ void run_fixed_size_test(int num_elements)
// MUST be positive because in any other case det(cR_t) may become negative for
// odd dimensions!
const Scalar c = abs(internal::random<Scalar>());
// Also if c is to small compared to t.norm(), problem is ill-posed (cf. Bug 744)
const Scalar c = internal::random<Scalar>(0.5, 2.0);
FixedMatrix R = randMatrixSpecialUnitary<Scalar>(dim);
FixedVector t = Scalar(50)*FixedVector::Random(dim,1);
FixedVector t = Scalar(32)*FixedVector::Random(dim,1);
HomMatrix cR_t = HomMatrix::Identity(dim+1,dim+1);
cR_t.block(0,0,dim,dim) = c*R;
@ -149,9 +150,9 @@ void run_fixed_size_test(int num_elements)
HomMatrix cR_t_umeyama = umeyama(src_block, dst_block);
const Scalar error = ( cR_t_umeyama*src - dst ).array().square().sum();
const Scalar error = ( cR_t_umeyama*src - dst ).squaredNorm();
VERIFY(error < Scalar(10)*std::numeric_limits<Scalar>::epsilon());
VERIFY(error < Scalar(16)*std::numeric_limits<Scalar>::epsilon());
}
void test_umeyama()

View File

@ -11,7 +11,12 @@
#define EIGEN_OPENGL_MODULE
#include <Eigen/Geometry>
#include <GL/gl.h>
#if defined(__APPLE_CC__)
#include <OpenGL/gl.h>
#else
#include <GL/gl.h>
#endif
namespace Eigen {

View File

@ -153,7 +153,22 @@ void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
Bc = m_B.cols();
dst.resize(this->rows(), this->cols());
dst.resizeNonZeros(0);
dst.reserve(m_A.nonZeros() * m_B.nonZeros());
// compute number of non-zeros per innervectors of dst
{
VectorXi nnzA = VectorXi::Zero(Dest::IsRowMajor ? m_A.rows() : m_A.cols());
for (Index kA=0; kA < m_A.outerSize(); ++kA)
for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA)
nnzA(Dest::IsRowMajor ? itA.row() : itA.col())++;
VectorXi nnzB = VectorXi::Zero(Dest::IsRowMajor ? m_B.rows() : m_B.cols());
for (Index kB=0; kB < m_B.outerSize(); ++kB)
for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB)
nnzB(Dest::IsRowMajor ? itB.row() : itB.col())++;
Matrix<int,Dynamic,Dynamic,ColMajor> nnzAB = nnzB * nnzA.transpose();
dst.reserve(VectorXi::Map(nnzAB.data(), nnzAB.size()));
}
for (Index kA=0; kA < m_A.outerSize(); ++kA)
{

View File

@ -31,6 +31,8 @@ namespace Eigen
enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ };
enum { DerivativeMemoryLayout = Dimension==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ };
/** \brief The data type used to store non-zero basis functions. */
typedef Array<Scalar,1,OrderAtCompileTime> BasisVectorType;
@ -39,7 +41,7 @@ namespace Eigen
typedef Array<Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
/** \brief The data type used to store the spline's derivative values. */
typedef Array<Scalar,Dimension,Dynamic,ColMajor,Dimension,NumOfDerivativesAtCompileTime> DerivativeType;
typedef Array<Scalar,Dimension,Dynamic,DerivativeMemoryLayout,Dimension,NumOfDerivativesAtCompileTime> DerivativeType;
/** \brief The point type the spline is representing. */
typedef Array<Scalar,Dimension,1> PointType;
@ -62,12 +64,14 @@ namespace Eigen
{
enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ };
enum { DerivativeMemoryLayout = _Dim==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ };
/** \brief The data type used to store the values of the basis function derivatives. */
typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
/** \brief The data type used to store the spline's derivative values. */
typedef Array<_Scalar,_Dim,Dynamic,ColMajor,_Dim,NumOfDerivativesAtCompileTime> DerivativeType;
typedef Array<_Scalar,_Dim,Dynamic,DerivativeMemoryLayout,_Dim,NumOfDerivativesAtCompileTime> DerivativeType;
};
/** \brief 2D float B-spline with dynamic degree. */

View File

@ -16,9 +16,6 @@ std::complex<T> RandomCpx() { return std::complex<T>( (T)(rand()/(T)RAND_MAX - .
using namespace std;
using namespace Eigen;
float norm(float x) {return x*x;}
double norm(double x) {return x*x;}
long double norm(long double x) {return x*x;}
template < typename T>
complex<long double> promote(complex<T> x) { return complex<long double>(x.real(),x.imag()); }
@ -40,11 +37,11 @@ complex<long double> promote(long double x) { return complex<long double>( x);
for (size_t k1=0;k1<(size_t)timebuf.size();++k1) {
acc += promote( timebuf[k1] ) * exp( complex<long double>(0,k1*phinc) );
}
totalpower += norm(acc);
totalpower += numext::abs2(acc);
complex<long double> x = promote(fftbuf[k0]);
complex<long double> dif = acc - x;
difpower += norm(dif);
//cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(norm(dif)) << endl;
difpower += numext::abs2(dif);
//cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(numext::abs2(dif)) << endl;
}
cerr << "rmse:" << sqrt(difpower/totalpower) << endl;
return sqrt(difpower/totalpower);
@ -57,8 +54,8 @@ complex<long double> promote(long double x) { return complex<long double>( x);
long double difpower=0;
size_t n = (min)( buf1.size(),buf2.size() );
for (size_t k=0;k<n;++k) {
totalpower += (norm( buf1[k] ) + norm(buf2[k]) )/2.;
difpower += norm(buf1[k] - buf2[k]);
totalpower += (numext::abs2( buf1[k] ) + numext::abs2(buf2[k]) )/2.;
difpower += numext::abs2(buf1[k] - buf2[k]);
}
return sqrt(difpower/totalpower);
}

View File

@ -183,4 +183,38 @@ void test_kronecker_product()
DM_b2.resize(4,8);
DM_ab2 = kroneckerProduct(DM_a2,DM_b2);
CALL_SUBTEST(check_dimension(DM_ab2,10*4,9*8));
for(int i = 0; i < g_repeat; i++)
{
double density = Eigen::internal::random<double>(0.01,0.5);
int ra = Eigen::internal::random<int>(1,50);
int ca = Eigen::internal::random<int>(1,50);
int rb = Eigen::internal::random<int>(1,50);
int cb = Eigen::internal::random<int>(1,50);
SparseMatrix<float,ColMajor> sA(ra,ca), sB(rb,cb), sC;
SparseMatrix<float,RowMajor> sC2;
MatrixXf dA(ra,ca), dB(rb,cb), dC;
initSparse(density, dA, sA);
initSparse(density, dB, sB);
sC = kroneckerProduct(sA,sB);
dC = kroneckerProduct(dA,dB);
VERIFY_IS_APPROX(MatrixXf(sC),dC);
sC = kroneckerProduct(sA.transpose(),sB);
dC = kroneckerProduct(dA.transpose(),dB);
VERIFY_IS_APPROX(MatrixXf(sC),dC);
sC = kroneckerProduct(sA.transpose(),sB.transpose());
dC = kroneckerProduct(dA.transpose(),dB.transpose());
VERIFY_IS_APPROX(MatrixXf(sC),dC);
sC = kroneckerProduct(sA,sB.transpose());
dC = kroneckerProduct(dA,dB.transpose());
VERIFY_IS_APPROX(MatrixXf(sC),dC);
sC2 = kroneckerProduct(sA,sB);
dC = kroneckerProduct(dA,dB);
VERIFY_IS_APPROX(MatrixXf(sC2),dC);
}
}

View File

@ -106,7 +106,6 @@ void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const
typedef typename POLYNOMIAL::Scalar Scalar;
typedef typename REAL_ROOTS::Scalar Real;
typedef PolynomialSolver<Scalar, Deg > PolynomialSolverType;
//Test realRoots
std::vector< Real > calc_realRoots;
@ -120,7 +119,7 @@ void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const
bool found = false;
for( size_t j=0; j<calc_realRoots.size()&& !found; ++j )
{
if( internal::isApprox( calc_realRoots[i], real_roots[j] ), psPrec ){
if( internal::isApprox( calc_realRoots[i], real_roots[j], psPrec ) ){
found = true; }
}
VERIFY( found );