mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-03-13 18:37:27 +08:00
merge with default branch
This commit is contained in:
commit
a325d1cb1e
@ -108,7 +108,8 @@ endif()
|
||||
set(EIGEN_TEST_MAX_SIZE "320" CACHE STRING "Maximal matrix/vector size, default is 320")
|
||||
|
||||
macro(ei_add_cxx_compiler_flag FLAG)
|
||||
string(REGEX REPLACE "-" "" SFLAG ${FLAG})
|
||||
string(REGEX REPLACE "-" "" SFLAG1 ${FLAG})
|
||||
string(REGEX REPLACE "\\+" "p" SFLAG ${SFLAG1})
|
||||
check_cxx_compiler_flag(${FLAG} COMPILER_SUPPORT_${SFLAG})
|
||||
if(COMPILER_SUPPORT_${SFLAG})
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}")
|
||||
@ -142,6 +143,9 @@ if(NOT MSVC)
|
||||
ei_add_cxx_compiler_flag("-Wpointer-arith")
|
||||
ei_add_cxx_compiler_flag("-Wwrite-strings")
|
||||
ei_add_cxx_compiler_flag("-Wformat-security")
|
||||
ei_add_cxx_compiler_flag("-Wshorten-64-to-32")
|
||||
ei_add_cxx_compiler_flag("-Wenum-conversion")
|
||||
ei_add_cxx_compiler_flag("-Wc++11-extensions")
|
||||
|
||||
ei_add_cxx_compiler_flag("-Wno-psabi")
|
||||
ei_add_cxx_compiler_flag("-Wno-variadic-macros")
|
||||
@ -153,6 +157,7 @@ if(NOT MSVC)
|
||||
ei_add_cxx_compiler_flag("-wd981") # disable ICC's "operands are evaluated in unspecified order" remark
|
||||
ei_add_cxx_compiler_flag("-wd2304") # disbale ICC's "warning #2304: non-explicit constructor with single argument may cause implicit type conversion" produced by -Wnon-virtual-dtor
|
||||
|
||||
|
||||
# The -ansi flag must be added last, otherwise it is also used as a linker flag by check_cxx_compiler_flag making it fails
|
||||
# Moreover we should not set both -strict-ansi and -ansi
|
||||
check_cxx_compiler_flag("-strict-ansi" COMPILER_SUPPORT_STRICTANSI)
|
||||
|
@ -264,6 +264,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename TranspositionType::StorageIndexType IndexType;
|
||||
eigen_assert(mat.rows()==mat.cols());
|
||||
const Index size = mat.rows();
|
||||
|
||||
@ -283,7 +284,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
|
||||
index_of_biggest_in_corner += k;
|
||||
|
||||
transpositions.coeffRef(k) = index_of_biggest_in_corner;
|
||||
transpositions.coeffRef(k) = IndexType(index_of_biggest_in_corner);
|
||||
if(k != index_of_biggest_in_corner)
|
||||
{
|
||||
// apply the transposition while taking care to consider only
|
||||
@ -292,7 +293,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k));
|
||||
mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s));
|
||||
std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner));
|
||||
for(int i=k+1;i<index_of_biggest_in_corner;++i)
|
||||
for(Index i=k+1;i<index_of_biggest_in_corner;++i)
|
||||
{
|
||||
Scalar tmp = mat.coeffRef(i,k);
|
||||
mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i));
|
||||
@ -460,6 +461,7 @@ template<typename MatrixType, int _UpLo>
|
||||
template<typename Derived>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
|
||||
{
|
||||
typedef typename TranspositionType::StorageIndexType IndexType;
|
||||
const Index size = w.rows();
|
||||
if (m_isInitialized)
|
||||
{
|
||||
@ -471,7 +473,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
|
||||
m_matrix.setZero();
|
||||
m_transpositions.resize(size);
|
||||
for (Index i = 0; i < size; i++)
|
||||
m_transpositions.coeffRef(i) = i;
|
||||
m_transpositions.coeffRef(i) = IndexType(i);
|
||||
m_temporary.resize(size);
|
||||
m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;
|
||||
m_isInitialized = true;
|
||||
|
@ -68,11 +68,11 @@ class PermutationBase : public EigenBase<Derived>
|
||||
MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
|
||||
};
|
||||
typedef typename Traits::Scalar Scalar;
|
||||
typedef typename Traits::StorageIndexType StorageIndexType;
|
||||
typedef typename Traits::Index Index;
|
||||
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>
|
||||
typedef Matrix<StorageIndexType,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>
|
||||
DenseMatrixType;
|
||||
typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,Index>
|
||||
typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,StorageIndexType>
|
||||
PlainPermutationType;
|
||||
using Base::derived;
|
||||
#endif
|
||||
@ -149,7 +149,7 @@ class PermutationBase : public EigenBase<Derived>
|
||||
/** Sets *this to be the identity permutation matrix */
|
||||
void setIdentity()
|
||||
{
|
||||
for(Index i = 0; i < size(); ++i)
|
||||
for(StorageIndexType i = 0; i < size(); ++i)
|
||||
indices().coeffRef(i) = i;
|
||||
}
|
||||
|
||||
@ -175,8 +175,8 @@ class PermutationBase : public EigenBase<Derived>
|
||||
eigen_assert(i>=0 && j>=0 && i<size() && j<size());
|
||||
for(Index k = 0; k < size(); ++k)
|
||||
{
|
||||
if(indices().coeff(k) == i) indices().coeffRef(k) = j;
|
||||
else if(indices().coeff(k) == j) indices().coeffRef(k) = i;
|
||||
if(indices().coeff(k) == i) indices().coeffRef(k) = StorageIndexType(j);
|
||||
else if(indices().coeff(k) == j) indices().coeffRef(k) = StorageIndexType(i);
|
||||
}
|
||||
return derived();
|
||||
}
|
||||
@ -264,7 +264,7 @@ class PermutationBase : public EigenBase<Derived>
|
||||
*
|
||||
* \param SizeAtCompileTime the number of rows/cols, or Dynamic
|
||||
* \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
|
||||
* \param IndexType the interger type of the indices
|
||||
* \param StorageIndexType the interger type of the indices
|
||||
*
|
||||
* This class represents a permutation matrix, internally stored as a vector of integers.
|
||||
*
|
||||
@ -272,17 +272,18 @@ class PermutationBase : public EigenBase<Derived>
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
|
||||
struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
|
||||
: traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType>
|
||||
struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType> >
|
||||
: traits<Matrix<_StorageIndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
|
||||
{
|
||||
typedef IndexType Index;
|
||||
typedef Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
|
||||
typedef Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
typedef _StorageIndexType StorageIndexType;
|
||||
};
|
||||
}
|
||||
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
|
||||
class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType>
|
||||
class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType> >
|
||||
{
|
||||
typedef PermutationBase<PermutationMatrix> Base;
|
||||
typedef internal::traits<PermutationMatrix> Traits;
|
||||
@ -294,6 +295,8 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename Traits::StorageIndexType StorageIndexType;
|
||||
typedef typename Traits::Index Index;
|
||||
#endif
|
||||
|
||||
inline PermutationMatrix()
|
||||
@ -301,7 +304,7 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
|
||||
|
||||
/** Constructs an uninitialized permutation matrix of given size.
|
||||
*/
|
||||
inline PermutationMatrix(int size) : m_indices(size)
|
||||
inline PermutationMatrix(Index size) : m_indices(size)
|
||||
{}
|
||||
|
||||
/** Copy constructor. */
|
||||
@ -390,18 +393,19 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
|
||||
|
||||
|
||||
namespace internal {
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
|
||||
struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
|
||||
: traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType, int _PacketAccess>
|
||||
struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType>,_PacketAccess> >
|
||||
: traits<Matrix<_StorageIndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
|
||||
{
|
||||
typedef IndexType Index;
|
||||
typedef Map<const Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;
|
||||
typedef Map<const Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
typedef _StorageIndexType StorageIndexType;
|
||||
};
|
||||
}
|
||||
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
|
||||
class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess>
|
||||
: public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType, int _PacketAccess>
|
||||
class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType>,_PacketAccess>
|
||||
: public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType>,_PacketAccess> >
|
||||
{
|
||||
typedef PermutationBase<Map> Base;
|
||||
typedef internal::traits<Map> Traits;
|
||||
@ -409,14 +413,15 @@ class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename IndicesType::Scalar Index;
|
||||
typedef typename IndicesType::Scalar StorageIndexType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
#endif
|
||||
|
||||
inline Map(const Index* indicesPtr)
|
||||
inline Map(const StorageIndexType* indicesPtr)
|
||||
: m_indices(indicesPtr)
|
||||
{}
|
||||
|
||||
inline Map(const Index* indicesPtr, Index size)
|
||||
inline Map(const StorageIndexType* indicesPtr, Index size)
|
||||
: m_indices(indicesPtr,size)
|
||||
{}
|
||||
|
||||
@ -472,7 +477,8 @@ struct traits<PermutationWrapper<_IndicesType> >
|
||||
{
|
||||
typedef PermutationStorage StorageKind;
|
||||
typedef typename _IndicesType::Scalar Scalar;
|
||||
typedef typename _IndicesType::Scalar Index;
|
||||
typedef typename _IndicesType::Scalar StorageIndexType;
|
||||
typedef typename _IndicesType::Index Index;
|
||||
typedef _IndicesType IndicesType;
|
||||
enum {
|
||||
RowsAtCompileTime = _IndicesType::SizeAtCompileTime,
|
||||
|
@ -53,7 +53,8 @@ class TranspositionsBase
|
||||
public:
|
||||
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename IndicesType::Scalar Index;
|
||||
typedef typename IndicesType::Scalar StorageIndexType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
|
||||
Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
||||
@ -81,17 +82,17 @@ class TranspositionsBase
|
||||
inline Index size() const { return indices().size(); }
|
||||
|
||||
/** Direct access to the underlying index vector */
|
||||
inline const Index& coeff(Index i) const { return indices().coeff(i); }
|
||||
inline const StorageIndexType& coeff(Index i) const { return indices().coeff(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline Index& coeffRef(Index i) { return indices().coeffRef(i); }
|
||||
inline StorageIndexType& coeffRef(Index i) { return indices().coeffRef(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline const Index& operator()(Index i) const { return indices()(i); }
|
||||
inline const StorageIndexType& operator()(Index i) const { return indices()(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline Index& operator()(Index i) { return indices()(i); }
|
||||
inline StorageIndexType& operator()(Index i) { return indices()(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline const Index& operator[](Index i) const { return indices()(i); }
|
||||
inline const StorageIndexType& operator[](Index i) const { return indices()(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline Index& operator[](Index i) { return indices()(i); }
|
||||
inline StorageIndexType& operator[](Index i) { return indices()(i); }
|
||||
|
||||
/** const version of indices(). */
|
||||
const IndicesType& indices() const { return derived().indices(); }
|
||||
@ -99,7 +100,7 @@ class TranspositionsBase
|
||||
IndicesType& indices() { return derived().indices(); }
|
||||
|
||||
/** Resizes to given size. */
|
||||
inline void resize(int newSize)
|
||||
inline void resize(Index newSize)
|
||||
{
|
||||
indices().resize(newSize);
|
||||
}
|
||||
@ -107,7 +108,7 @@ class TranspositionsBase
|
||||
/** Sets \c *this to represents an identity transformation */
|
||||
void setIdentity()
|
||||
{
|
||||
for(int i = 0; i < indices().size(); ++i)
|
||||
for(StorageIndexType i = 0; i < indices().size(); ++i)
|
||||
coeffRef(i) = i;
|
||||
}
|
||||
|
||||
@ -144,23 +145,26 @@ class TranspositionsBase
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
|
||||
struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType>
|
||||
struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType> >
|
||||
{
|
||||
typedef IndexType Index;
|
||||
typedef Matrix<Index, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
|
||||
typedef Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
typedef _StorageIndexType StorageIndexType;
|
||||
};
|
||||
}
|
||||
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
|
||||
class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType>
|
||||
class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType> >
|
||||
{
|
||||
typedef internal::traits<Transpositions> Traits;
|
||||
public:
|
||||
|
||||
typedef TranspositionsBase<Transpositions> Base;
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename IndicesType::Scalar Index;
|
||||
typedef typename IndicesType::Scalar StorageIndexType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
|
||||
|
||||
inline Transpositions() {}
|
||||
|
||||
@ -215,30 +219,32 @@ class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTim
|
||||
|
||||
|
||||
namespace internal {
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
|
||||
struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,_PacketAccess> >
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType, int _PacketAccess>
|
||||
struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType>,_PacketAccess> >
|
||||
{
|
||||
typedef IndexType Index;
|
||||
typedef Map<const Matrix<Index,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
|
||||
typedef Map<const Matrix<_StorageIndexType,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
typedef _StorageIndexType StorageIndexType;
|
||||
};
|
||||
}
|
||||
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int PacketAccess>
|
||||
class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess>
|
||||
: public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess> >
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType, int PacketAccess>
|
||||
class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType>,PacketAccess>
|
||||
: public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType>,PacketAccess> >
|
||||
{
|
||||
typedef internal::traits<Map> Traits;
|
||||
public:
|
||||
|
||||
typedef TranspositionsBase<Map> Base;
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename IndicesType::Scalar Index;
|
||||
typedef typename IndicesType::Scalar StorageIndexType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
|
||||
inline Map(const Index* indicesPtr)
|
||||
inline Map(const StorageIndexType* indicesPtr)
|
||||
: m_indices(indicesPtr)
|
||||
{}
|
||||
|
||||
inline Map(const Index* indicesPtr, Index size)
|
||||
inline Map(const StorageIndexType* indicesPtr, Index size)
|
||||
: m_indices(indicesPtr,size)
|
||||
{}
|
||||
|
||||
@ -275,7 +281,8 @@ namespace internal {
|
||||
template<typename _IndicesType>
|
||||
struct traits<TranspositionsWrapper<_IndicesType> >
|
||||
{
|
||||
typedef typename _IndicesType::Scalar Index;
|
||||
typedef typename _IndicesType::Scalar StorageIndexType;
|
||||
typedef typename _IndicesType::Index Index;
|
||||
typedef _IndicesType IndicesType;
|
||||
};
|
||||
}
|
||||
@ -289,7 +296,8 @@ class TranspositionsWrapper
|
||||
|
||||
typedef TranspositionsBase<TranspositionsWrapper> Base;
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename IndicesType::Scalar Index;
|
||||
typedef typename IndicesType::Scalar StorageIndexType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
|
||||
inline TranspositionsWrapper(IndicesType& a_indices)
|
||||
: m_indices(a_indices)
|
||||
@ -363,24 +371,25 @@ struct transposition_matrix_product_retval
|
||||
{
|
||||
typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
|
||||
typedef typename TranspositionType::Index Index;
|
||||
typedef typename TranspositionType::StorageIndexType StorageIndexType;
|
||||
|
||||
transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix)
|
||||
: m_transpositions(tr), m_matrix(matrix)
|
||||
{}
|
||||
|
||||
inline int rows() const { return m_matrix.rows(); }
|
||||
inline int cols() const { return m_matrix.cols(); }
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
template<typename Dest> inline void evalTo(Dest& dst) const
|
||||
{
|
||||
const int size = m_transpositions.size();
|
||||
Index j = 0;
|
||||
const Index size = m_transpositions.size();
|
||||
StorageIndexType j = 0;
|
||||
|
||||
if(!(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix)))
|
||||
dst = m_matrix;
|
||||
|
||||
for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)
|
||||
if((j=m_transpositions.coeff(k))!=k)
|
||||
for(Index k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)
|
||||
if(Index(j=m_transpositions.coeff(k))!=k)
|
||||
{
|
||||
if(Side==OnTheLeft)
|
||||
dst.row(k).swap(dst.row(j));
|
||||
|
@ -1,7 +1,7 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Konstantinos Margaritis <markos@codex.gr>
|
||||
// Copyright (C) 2008-2014 Konstantinos Margaritis <markos@freevec.org>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
@ -62,17 +62,17 @@ typedef __vector unsigned char Packet16uc;
|
||||
// Define global static constants:
|
||||
static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 };
|
||||
static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 };
|
||||
static Packet16uc p16uc_REVERSE = {12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3};
|
||||
static Packet16uc p16uc_FORWARD = vec_lvsl(0, (float*)0);
|
||||
static Packet16uc p16uc_DUPLICATE = {0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7};
|
||||
static Packet16uc p16uc_REVERSE = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3};
|
||||
static Packet16uc p16uc_FORWARD = vec_lvsl(0, (float*)0); //{ 0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15}
|
||||
static Packet16uc p16uc_DUPLICATE = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7};
|
||||
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0);
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0);
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1);
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16);
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1);
|
||||
static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0);
|
||||
static Packet4f p4f_ZERO_ = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1);
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); //{ 0.0, 0.0, 0.0, 0.0}
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,}
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1); //{ 1, 1, 1, 1}
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16); //{ -16, -16, -16, -16}
|
||||
static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); //{ -1, -1, -1, -1}
|
||||
static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0); //{ 1.0, 1.0, 1.0, 1.0}
|
||||
static Packet4f p4f_ZERO_ = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1); //{ 0x80000000, 0x80000000, 0x80000000, 0x80000000}
|
||||
|
||||
template<> struct packet_traits<float> : default_packet_traits
|
||||
{
|
||||
@ -82,8 +82,10 @@ template<> struct packet_traits<float> : default_packet_traits
|
||||
Vectorizable = 1,
|
||||
AlignedOnScalar = 1,
|
||||
size=4,
|
||||
HasHalfPacket=0,
|
||||
|
||||
// FIXME check the Has*
|
||||
HasDiv = 1,
|
||||
HasSin = 0,
|
||||
HasCos = 0,
|
||||
HasLog = 0,
|
||||
|
@ -82,22 +82,31 @@ template<typename T> struct add_const_on_value_type<T const* const> { typedef T
|
||||
|
||||
|
||||
template<typename From, typename To>
|
||||
struct is_convertible
|
||||
struct is_convertible_impl
|
||||
{
|
||||
private:
|
||||
struct any_conversion
|
||||
{
|
||||
template <typename T> any_conversion(const volatile T&);
|
||||
template <typename T> any_conversion(T&);
|
||||
};
|
||||
struct yes {int a[1];};
|
||||
struct no {int a[2];};
|
||||
|
||||
template<typename T>
|
||||
static yes test (const T&) {}
|
||||
|
||||
template<typename> static no test (...) {}
|
||||
|
||||
static yes test(const To&, int);
|
||||
static no test(any_conversion, ...);
|
||||
|
||||
public:
|
||||
static From ms_from;
|
||||
enum { value = sizeof(test<To>(ms_from))==sizeof(yes) };
|
||||
enum { value = sizeof(test(ms_from, 0))==sizeof(yes) };
|
||||
};
|
||||
|
||||
template<typename From, typename To>
|
||||
struct is_convertible
|
||||
{
|
||||
enum { value = is_convertible_impl<typename remove_all<From>::type,
|
||||
typename remove_all<To >::type>::value };
|
||||
};
|
||||
|
||||
/** \internal Allows to enable/disable an overload
|
||||
* according to a compile time condition.
|
||||
|
@ -427,13 +427,13 @@ struct partial_lu_impl
|
||||
/** \internal performs the LU decomposition with partial pivoting in-place.
|
||||
*/
|
||||
template<typename MatrixType, typename TranspositionType>
|
||||
void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::Index& nb_transpositions)
|
||||
void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndexType& nb_transpositions)
|
||||
{
|
||||
eigen_assert(lu.cols() == row_transpositions.size());
|
||||
eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
|
||||
|
||||
partial_lu_impl
|
||||
<typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::Index>
|
||||
<typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndexType>
|
||||
::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
|
||||
}
|
||||
|
||||
@ -452,7 +452,7 @@ PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& ma
|
||||
|
||||
m_rowsTranspositions.resize(size);
|
||||
|
||||
typename TranspositionType::Index nb_transpositions;
|
||||
typename TranspositionType::StorageIndexType nb_transpositions;
|
||||
internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);
|
||||
m_det_p = (nb_transpositions%2) ? -1 : 1;
|
||||
|
||||
|
@ -375,17 +375,19 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
|
||||
Scalar z;
|
||||
JacobiRotation<Scalar> rot;
|
||||
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
|
||||
|
||||
if(n==0)
|
||||
{
|
||||
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
|
||||
work_matrix.row(p) *= z;
|
||||
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
|
||||
if(work_matrix.coeff(q,q)!=Scalar(0))
|
||||
{
|
||||
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
|
||||
else
|
||||
z = Scalar(0);
|
||||
work_matrix.row(q) *= z;
|
||||
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
|
||||
work_matrix.row(q) *= z;
|
||||
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
|
||||
}
|
||||
// otherwise the second row is already zero, so we have nothing to do.
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -852,7 +854,7 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
|
||||
if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
|
||||
}
|
||||
|
||||
// Scaling factor to reducover/under-flows
|
||||
// Scaling factor to reduce over/under-flows
|
||||
RealScalar scale = m_workMatrix.cwiseAbs().maxCoeff();
|
||||
if(scale==RealScalar(0)) scale = RealScalar(1);
|
||||
m_workMatrix /= scale;
|
||||
|
@ -51,20 +51,20 @@ template<typename OtherDerived>
|
||||
inline Derived& SparseMatrixBase<Derived>::assign(const OtherDerived& other)
|
||||
{
|
||||
const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
||||
const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
|
||||
const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? Index(other.rows()) : Index(other.cols());
|
||||
if ((!transpose) && other.isRValue())
|
||||
{
|
||||
// eval without temporary
|
||||
derived().resize(other.rows(), other.cols());
|
||||
derived().resize(Index(other.rows()), Index(other.cols()));
|
||||
derived().setZero();
|
||||
derived().reserve((std::max)(this->rows(),this->cols())*2);
|
||||
for (Index j=0; j<outerSize; ++j)
|
||||
{
|
||||
derived().startVec(j);
|
||||
for (typename OtherDerived::InnerIterator it(other, j); it; ++it)
|
||||
for (typename OtherDerived::InnerIterator it(other, typename OtherDerived::Index(j)); it; ++it)
|
||||
{
|
||||
Scalar v = it.value();
|
||||
derived().insertBackByOuterInner(j,it.index()) = v;
|
||||
derived().insertBackByOuterInner(j,Index(it.index())) = v;
|
||||
}
|
||||
}
|
||||
derived().finalize();
|
||||
@ -87,19 +87,19 @@ inline void SparseMatrixBase<Derived>::assignGeneric(const OtherDerived& other)
|
||||
|
||||
enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
|
||||
|
||||
const Index outerSize = other.outerSize();
|
||||
const Index outerSize = Index(other.outerSize());
|
||||
//typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
|
||||
// thanks to shallow copies, we always eval to a tempary
|
||||
Derived temp(other.rows(), other.cols());
|
||||
Derived temp(Index(other.rows()), Index(other.cols()));
|
||||
|
||||
temp.reserve((std::max)(this->rows(),this->cols())*2);
|
||||
for (Index j=0; j<outerSize; ++j)
|
||||
{
|
||||
temp.startVec(j);
|
||||
for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
|
||||
for (typename OtherDerived::InnerIterator it(other.derived(), typename OtherDerived::Index(j)); it; ++it)
|
||||
{
|
||||
Scalar v = it.value();
|
||||
temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
|
||||
temp.insertBackByOuterInner(Flip?Index(it.index()):j,Flip?j:Index(it.index())) = v;
|
||||
}
|
||||
}
|
||||
temp.finalize();
|
||||
|
@ -65,7 +65,6 @@ class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
|
||||
: public internal::sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs,typename CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator>
|
||||
{
|
||||
public:
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
|
||||
BinaryOp,Lhs,Rhs, InnerIterator> Base;
|
||||
|
||||
@ -91,11 +90,11 @@ class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived
|
||||
{
|
||||
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
|
||||
typedef typename traits<CwiseBinaryXpr>::Scalar Scalar;
|
||||
typedef typename traits<CwiseBinaryXpr>::Index Index;
|
||||
typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
|
||||
typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
|
||||
typedef typename _LhsNested::InnerIterator LhsIterator;
|
||||
typedef typename _RhsNested::InnerIterator RhsIterator;
|
||||
typedef typename Lhs::Index Index;
|
||||
|
||||
public:
|
||||
|
||||
@ -157,11 +156,11 @@ class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs,
|
||||
typedef scalar_product_op<T> BinaryFunc;
|
||||
typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
|
||||
typedef typename CwiseBinaryXpr::Scalar Scalar;
|
||||
typedef typename CwiseBinaryXpr::Index Index;
|
||||
typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
|
||||
typedef typename _LhsNested::InnerIterator LhsIterator;
|
||||
typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
|
||||
typedef typename _RhsNested::InnerIterator RhsIterator;
|
||||
typedef typename Lhs::Index Index;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
|
||||
@ -211,15 +210,15 @@ class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs,
|
||||
typedef scalar_product_op<T> BinaryFunc;
|
||||
typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
|
||||
typedef typename CwiseBinaryXpr::Scalar Scalar;
|
||||
typedef typename CwiseBinaryXpr::Index Index;
|
||||
typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
|
||||
typedef typename traits<CwiseBinaryXpr>::RhsNested RhsNested;
|
||||
typedef typename _LhsNested::InnerIterator LhsIterator;
|
||||
typedef typename Lhs::Index Index;
|
||||
enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
|
||||
: m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
|
||||
: m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),typename _LhsNested::Index(outer)), m_functor(xpr.functor()), m_outer(outer)
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE Derived& operator++()
|
||||
@ -252,9 +251,9 @@ class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs,
|
||||
typedef scalar_product_op<T> BinaryFunc;
|
||||
typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
|
||||
typedef typename CwiseBinaryXpr::Scalar Scalar;
|
||||
typedef typename CwiseBinaryXpr::Index Index;
|
||||
typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
|
||||
typedef typename _RhsNested::InnerIterator RhsIterator;
|
||||
typedef typename Lhs::Index Index;
|
||||
|
||||
enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
|
||||
public:
|
||||
|
@ -248,8 +248,8 @@ class SparseDenseOuterProduct
|
||||
EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
|
||||
EIGEN_STRONG_INLINE Index rows() const { return Tr ? Index(m_rhs.rows()) : m_lhs.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : Index(m_rhs.cols()); }
|
||||
|
||||
EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
|
||||
EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
|
||||
@ -266,31 +266,33 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes
|
||||
typedef typename SparseDenseOuterProduct::Index Index;
|
||||
public:
|
||||
EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
|
||||
: Base(prod.lhs(), 0), m_outer(outer), m_factor(get(prod.rhs(), outer, typename internal::traits<Rhs>::StorageKind() ))
|
||||
{ }
|
||||
: Base(prod.lhs(), 0), m_outer(outer), m_empty(false), m_factor(get(prod.rhs(), outer, typename internal::traits<Rhs>::StorageKind() ))
|
||||
{}
|
||||
|
||||
inline Index outer() const { return m_outer; }
|
||||
inline Index row() const { return Transpose ? m_outer : Base::index(); }
|
||||
inline Index col() const { return Transpose ? Base::index() : m_outer; }
|
||||
|
||||
inline Scalar value() const { return Base::value() * m_factor; }
|
||||
inline operator bool() const { return Base::operator bool() && !m_empty; }
|
||||
|
||||
protected:
|
||||
static Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense())
|
||||
Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense()) const
|
||||
{
|
||||
return rhs.coeff(outer);
|
||||
}
|
||||
|
||||
static Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse())
|
||||
Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse())
|
||||
{
|
||||
typename Traits::_RhsNested::InnerIterator it(rhs, outer);
|
||||
if (it && it.index()==0)
|
||||
if (it && it.index()==0 && it.value()!=Scalar(0))
|
||||
return it.value();
|
||||
|
||||
m_empty = true;
|
||||
return Scalar(0);
|
||||
}
|
||||
|
||||
Index m_outer;
|
||||
bool m_empty;
|
||||
Scalar m_factor;
|
||||
};
|
||||
|
||||
|
@ -34,8 +34,10 @@ struct traits<SparseDiagonalProduct<Lhs, Rhs> >
|
||||
typedef typename remove_all<Lhs>::type _Lhs;
|
||||
typedef typename remove_all<Rhs>::type _Rhs;
|
||||
typedef typename _Lhs::Scalar Scalar;
|
||||
typedef typename promote_index_type<typename traits<Lhs>::Index,
|
||||
typename traits<Rhs>::Index>::type Index;
|
||||
// propagate the index type of the sparse matrix
|
||||
typedef typename conditional< is_diagonal<_Lhs>::ret,
|
||||
typename traits<Rhs>::Index,
|
||||
typename traits<Lhs>::Index>::type Index;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@ -92,8 +94,8 @@ class SparseDiagonalProduct
|
||||
eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
|
||||
EIGEN_STRONG_INLINE Index rows() const { return Index(m_lhs.rows()); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return Index(m_rhs.cols()); }
|
||||
|
||||
EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
|
||||
EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
|
||||
@ -116,7 +118,7 @@ class sparse_diagonal_product_inner_iterator_selector
|
||||
: public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
|
||||
{
|
||||
typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Rhs::Index Index;
|
||||
public:
|
||||
inline sparse_diagonal_product_inner_iterator_selector(
|
||||
const SparseDiagonalProductType& expr, Index outer)
|
||||
@ -136,7 +138,7 @@ class sparse_diagonal_product_inner_iterator_selector
|
||||
scalar_product_op<typename Lhs::Scalar>,
|
||||
const typename Rhs::ConstInnerVectorReturnType,
|
||||
const typename Lhs::DiagonalVectorType>::InnerIterator Base;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Rhs::Index Index;
|
||||
Index m_outer;
|
||||
public:
|
||||
inline sparse_diagonal_product_inner_iterator_selector(
|
||||
|
@ -808,7 +808,9 @@ protected:
|
||||
template<typename Other>
|
||||
void initAssignment(const Other& other)
|
||||
{
|
||||
resize(other.rows(), other.cols());
|
||||
eigen_assert( other.rows() == typename Other::Index(Index(other.rows()))
|
||||
&& other.cols() == typename Other::Index(Index(other.cols())) );
|
||||
resize(Index(other.rows()), Index(other.cols()));
|
||||
if(m_innerNonZeros)
|
||||
{
|
||||
std::free(m_innerNonZeros);
|
||||
@ -948,7 +950,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
|
||||
enum { IsRowMajor = SparseMatrixType::IsRowMajor };
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
typedef typename SparseMatrixType::Index Index;
|
||||
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
|
||||
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
|
||||
|
||||
if(begin!=end)
|
||||
{
|
||||
|
@ -366,7 +366,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
{
|
||||
dst.setZero();
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
for (typename Derived::InnerIterator i(derived(),j); i; ++i)
|
||||
for (typename Derived::InnerIterator i(derived(),typename Derived::Index(j)); i; ++i)
|
||||
dst.coeffRef(i.row(),i.col()) = i.value();
|
||||
}
|
||||
#endif
|
||||
|
@ -246,7 +246,7 @@ class SparseSelfAdjointTimeDenseProduct
|
||||
|| ( (UpLo&Lower) && LhsIsRowMajor),
|
||||
ProcessSecondHalf = !ProcessFirstHalf
|
||||
};
|
||||
for (Index j=0; j<m_lhs.outerSize(); ++j)
|
||||
for (typename _Lhs::Index j=0; j<m_lhs.outerSize(); ++j)
|
||||
{
|
||||
LhsInnerIterator i(m_lhs,j);
|
||||
if (ProcessSecondHalf)
|
||||
|
@ -67,6 +67,7 @@ template<typename MatrixType, int QRPreconditioner>
|
||||
void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions)
|
||||
{
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
Index rows = m.rows();
|
||||
Index cols = m.cols();
|
||||
@ -81,9 +82,37 @@ void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions)
|
||||
|
||||
RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols));
|
||||
JacobiSVD<MatrixType, QRPreconditioner> svd(m, computationOptions);
|
||||
|
||||
if(internal::is_same<RealScalar,double>::value) svd.setThreshold(1e-8);
|
||||
else if(internal::is_same<RealScalar,float>::value) svd.setThreshold(1e-4);
|
||||
|
||||
SolutionType x = svd.solve(rhs);
|
||||
|
||||
RealScalar residual = (m*x-rhs).norm();
|
||||
// Check that there is no significantly better solution in the neighborhood of x
|
||||
if(!test_isMuchSmallerThan(residual,rhs.norm()))
|
||||
{
|
||||
// If the residual is very small, then we have an exact solution, so we are already good.
|
||||
for(int k=0;k<x.rows();++k)
|
||||
{
|
||||
SolutionType y(x);
|
||||
y.row(k).array() += 2*NumTraits<RealScalar>::epsilon();
|
||||
RealScalar residual_y = (m*y-rhs).norm();
|
||||
VERIFY( test_isApprox(residual_y,residual) || residual < residual_y );
|
||||
|
||||
y.row(k) = x.row(k).array() - 2*NumTraits<RealScalar>::epsilon();
|
||||
residual_y = (m*y-rhs).norm();
|
||||
VERIFY( test_isApprox(residual_y,residual) || residual < residual_y );
|
||||
}
|
||||
}
|
||||
|
||||
// evaluate normal equation which works also for least-squares solutions
|
||||
VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs);
|
||||
if(internal::is_same<RealScalar,double>::value)
|
||||
{
|
||||
// This test is not stable with single precision.
|
||||
// This is probably because squaring m signicantly affects the precision.
|
||||
VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs);
|
||||
}
|
||||
|
||||
// check minimal norm solutions
|
||||
{
|
||||
@ -139,10 +168,9 @@ void jacobisvd_test_all_computation_options(const MatrixType& m)
|
||||
if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
|
||||
return;
|
||||
JacobiSVD<MatrixType, QRPreconditioner> fullSvd(m, ComputeFullU|ComputeFullV);
|
||||
|
||||
jacobisvd_check_full(m, fullSvd);
|
||||
jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeFullV);
|
||||
|
||||
CALL_SUBTEST(( jacobisvd_check_full(m, fullSvd) ));
|
||||
CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeFullV) ));
|
||||
|
||||
#if defined __INTEL_COMPILER
|
||||
// remark #111: statement is unreachable
|
||||
#pragma warning disable 111
|
||||
@ -150,20 +178,20 @@ void jacobisvd_test_all_computation_options(const MatrixType& m)
|
||||
if(QRPreconditioner == FullPivHouseholderQRPreconditioner)
|
||||
return;
|
||||
|
||||
jacobisvd_compare_to_full(m, ComputeFullU, fullSvd);
|
||||
jacobisvd_compare_to_full(m, ComputeFullV, fullSvd);
|
||||
jacobisvd_compare_to_full(m, 0, fullSvd);
|
||||
CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU, fullSvd) ));
|
||||
CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullV, fullSvd) ));
|
||||
CALL_SUBTEST(( jacobisvd_compare_to_full(m, 0, fullSvd) ));
|
||||
|
||||
if (MatrixType::ColsAtCompileTime == Dynamic) {
|
||||
// thin U/V are only available with dynamic number of columns
|
||||
jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd);
|
||||
jacobisvd_compare_to_full(m, ComputeThinV, fullSvd);
|
||||
jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd);
|
||||
jacobisvd_compare_to_full(m, ComputeThinU , fullSvd);
|
||||
jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd);
|
||||
jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeThinV);
|
||||
jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeFullV);
|
||||
jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeThinV);
|
||||
CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd) ));
|
||||
CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinV, fullSvd) ));
|
||||
CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd) ));
|
||||
CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU , fullSvd) ));
|
||||
CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd) ));
|
||||
CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeThinV) ));
|
||||
CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeFullV) ));
|
||||
CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeThinV) ));
|
||||
|
||||
// test reconstruction
|
||||
typedef typename MatrixType::Index Index;
|
||||
@ -176,12 +204,29 @@ void jacobisvd_test_all_computation_options(const MatrixType& m)
|
||||
template<typename MatrixType>
|
||||
void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
|
||||
{
|
||||
MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
|
||||
MatrixType m = a;
|
||||
if(pickrandom)
|
||||
{
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
Index diagSize = (std::min)(a.rows(), a.cols());
|
||||
RealScalar s = std::numeric_limits<RealScalar>::max_exponent10/4;
|
||||
s = internal::random<RealScalar>(1,s);
|
||||
Matrix<RealScalar,Dynamic,1> d = Matrix<RealScalar,Dynamic,1>::Random(diagSize);
|
||||
for(Index k=0; k<diagSize; ++k)
|
||||
d(k) = d(k)*std::pow(RealScalar(10),internal::random<RealScalar>(-s,s));
|
||||
m = Matrix<Scalar,Dynamic,Dynamic>::Random(a.rows(),diagSize) * d.asDiagonal() * Matrix<Scalar,Dynamic,Dynamic>::Random(diagSize,a.cols());
|
||||
// cancel some coeffs
|
||||
Index n = internal::random<Index>(0,m.size()-1);
|
||||
for(Index i=0; i<n; ++i)
|
||||
m(internal::random<Index>(0,m.rows()-1), internal::random<Index>(0,m.cols()-1)) = Scalar(0);
|
||||
}
|
||||
|
||||
jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m);
|
||||
jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m);
|
||||
jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m);
|
||||
jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m);
|
||||
CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m) ));
|
||||
CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m) ));
|
||||
CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m) ));
|
||||
CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m) ));
|
||||
}
|
||||
|
||||
template<typename MatrixType> void jacobisvd_verify_assert(const MatrixType& m)
|
||||
@ -384,6 +429,7 @@ void test_jacobisvd()
|
||||
TEST_SET_BUT_UNUSED_VARIABLE(r)
|
||||
TEST_SET_BUT_UNUSED_VARIABLE(c)
|
||||
|
||||
CALL_SUBTEST_10(( jacobisvd<MatrixXd>(MatrixXd(r,c)) ));
|
||||
CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(r,c)) ));
|
||||
CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(r,c)) ));
|
||||
(void) r;
|
||||
|
@ -9,6 +9,12 @@
|
||||
|
||||
#include "main.h"
|
||||
|
||||
template<typename From, typename To>
|
||||
bool check_is_convertible(const From&, const To&)
|
||||
{
|
||||
return internal::is_convertible<From,To>::value;
|
||||
}
|
||||
|
||||
void test_meta()
|
||||
{
|
||||
VERIFY((internal::conditional<(3<4),internal::true_type, internal::false_type>::type::value));
|
||||
@ -52,6 +58,24 @@ void test_meta()
|
||||
VERIFY(( internal::is_same<const float,internal::remove_pointer<const float*>::type >::value));
|
||||
VERIFY(( internal::is_same<float,internal::remove_pointer<float* const >::type >::value));
|
||||
|
||||
VERIFY(( internal::is_convertible<float,double>::value ));
|
||||
VERIFY(( internal::is_convertible<int,double>::value ));
|
||||
VERIFY(( internal::is_convertible<double,int>::value ));
|
||||
VERIFY((!internal::is_convertible<std::complex<double>,double>::value ));
|
||||
VERIFY(( internal::is_convertible<Array33f,Matrix3f>::value ));
|
||||
// VERIFY((!internal::is_convertible<Matrix3f,Matrix3d>::value )); //does not work because the conversion is prevented by a static assertion
|
||||
VERIFY((!internal::is_convertible<Array33f,int>::value ));
|
||||
VERIFY((!internal::is_convertible<MatrixXf,float>::value ));
|
||||
{
|
||||
float f;
|
||||
MatrixXf A, B;
|
||||
VectorXf a, b;
|
||||
VERIFY(( check_is_convertible(a.dot(b), f) ));
|
||||
VERIFY(( check_is_convertible(a.transpose()*b, f) ));
|
||||
VERIFY((!check_is_convertible(A*B, f) ));
|
||||
VERIFY(( check_is_convertible(A*B, A) ));
|
||||
}
|
||||
|
||||
VERIFY(internal::meta_sqrt<1>::ret == 1);
|
||||
#define VERIFY_META_SQRT(X) VERIFY(internal::meta_sqrt<X>::ret == int(std::sqrt(double(X))))
|
||||
VERIFY_META_SQRT(2);
|
||||
|
@ -61,4 +61,13 @@ void test_product_large()
|
||||
VERIFY_IS_APPROX(r2, (mat1.row(2)*mat2).eval());
|
||||
}
|
||||
#endif
|
||||
|
||||
// Regression test for bug 714:
|
||||
#ifdef EIGEN_HAS_OPENMP
|
||||
std::cout << "Testing omp_set_dynamic(1)\n";
|
||||
omp_set_dynamic(1);
|
||||
for(int i = 0; i < g_repeat; i++) {
|
||||
CALL_SUBTEST_6( product(Matrix<float,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -84,10 +84,18 @@ void test_product_trsolve()
|
||||
CALL_SUBTEST_4((trsolve<std::complex<double>,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2),internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))));
|
||||
|
||||
// vectors
|
||||
CALL_SUBTEST_1((trsolve<float,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))));
|
||||
CALL_SUBTEST_5((trsolve<std::complex<double>,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))));
|
||||
CALL_SUBTEST_6((trsolve<float,1,1>()));
|
||||
CALL_SUBTEST_7((trsolve<float,1,2>()));
|
||||
CALL_SUBTEST_8((trsolve<std::complex<float>,4,1>()));
|
||||
CALL_SUBTEST_5((trsolve<float,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))));
|
||||
CALL_SUBTEST_6((trsolve<double,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))));
|
||||
CALL_SUBTEST_7((trsolve<std::complex<float>,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))));
|
||||
CALL_SUBTEST_8((trsolve<std::complex<double>,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))));
|
||||
|
||||
// meta-unrollers
|
||||
CALL_SUBTEST_9((trsolve<float,4,1>()));
|
||||
CALL_SUBTEST_10((trsolve<double,4,1>()));
|
||||
CALL_SUBTEST_11((trsolve<std::complex<float>,4,1>()));
|
||||
CALL_SUBTEST_12((trsolve<float,1,1>()));
|
||||
CALL_SUBTEST_13((trsolve<float,1,2>()));
|
||||
CALL_SUBTEST_14((trsolve<float,3,1>()));
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -108,28 +108,39 @@ template<typename SparseMatrixType> void sparse_product()
|
||||
Index r = internal::random<Index>(0,rows-1);
|
||||
Index c1 = internal::random<Index>(0,cols-1);
|
||||
Index r1 = internal::random<Index>(0,depth-1);
|
||||
DenseMatrix dm5 = DenseMatrix::Random(depth, cols);
|
||||
|
||||
VERIFY_IS_APPROX( m4=m2.col(c)*refMat3.col(c1).transpose(), refMat4=refMat2.col(c)*refMat3.col(c1).transpose());
|
||||
VERIFY_IS_APPROX( m4=m2.middleCols(c,1)*refMat3.col(c1).transpose(), refMat4=refMat2.col(c)*refMat3.col(c1).transpose());
|
||||
VERIFY_IS_APPROX(dm4=m2.col(c)*refMat3.col(c1).transpose(), refMat4=refMat2.col(c)*refMat3.col(c1).transpose());
|
||||
VERIFY_IS_APPROX( m4=m2.col(c)*dm5.col(c1).transpose(), refMat4=refMat2.col(c)*dm5.col(c1).transpose());
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX( m4=m2.middleCols(c,1)*dm5.col(c1).transpose(), refMat4=refMat2.col(c)*dm5.col(c1).transpose());
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX(dm4=m2.col(c)*dm5.col(c1).transpose(), refMat4=refMat2.col(c)*dm5.col(c1).transpose());
|
||||
|
||||
VERIFY_IS_APPROX(m4=refMat3.col(c1)*m2.col(c).transpose(), refMat4=refMat3.col(c1)*refMat2.col(c).transpose());
|
||||
VERIFY_IS_APPROX(m4=refMat3.col(c1)*m2.middleCols(c,1).transpose(), refMat4=refMat3.col(c1)*refMat2.col(c).transpose());
|
||||
VERIFY_IS_APPROX(dm4=refMat3.col(c1)*m2.col(c).transpose(), refMat4=refMat3.col(c1)*refMat2.col(c).transpose());
|
||||
VERIFY_IS_APPROX(m4=dm5.col(c1)*m2.col(c).transpose(), refMat4=dm5.col(c1)*refMat2.col(c).transpose());
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX(m4=dm5.col(c1)*m2.middleCols(c,1).transpose(), refMat4=dm5.col(c1)*refMat2.col(c).transpose());
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX(dm4=dm5.col(c1)*m2.col(c).transpose(), refMat4=dm5.col(c1)*refMat2.col(c).transpose());
|
||||
|
||||
VERIFY_IS_APPROX( m4=refMat3.row(r1).transpose()*m2.col(c).transpose(), refMat4=refMat3.row(r1).transpose()*refMat2.col(c).transpose());
|
||||
VERIFY_IS_APPROX(dm4=refMat3.row(r1).transpose()*m2.col(c).transpose(), refMat4=refMat3.row(r1).transpose()*refMat2.col(c).transpose());
|
||||
VERIFY_IS_APPROX( m4=dm5.row(r1).transpose()*m2.col(c).transpose(), refMat4=dm5.row(r1).transpose()*refMat2.col(c).transpose());
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX(dm4=dm5.row(r1).transpose()*m2.col(c).transpose(), refMat4=dm5.row(r1).transpose()*refMat2.col(c).transpose());
|
||||
|
||||
VERIFY_IS_APPROX( m4=m2.row(r).transpose()*refMat3.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*refMat3.col(c1).transpose());
|
||||
VERIFY_IS_APPROX( m4=m2.middleRows(r,1).transpose()*refMat3.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*refMat3.col(c1).transpose());
|
||||
VERIFY_IS_APPROX(dm4=m2.row(r).transpose()*refMat3.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*refMat3.col(c1).transpose());
|
||||
VERIFY_IS_APPROX( m4=m2.row(r).transpose()*dm5.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*dm5.col(c1).transpose());
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX( m4=m2.middleRows(r,1).transpose()*dm5.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*dm5.col(c1).transpose());
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX(dm4=m2.row(r).transpose()*dm5.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*dm5.col(c1).transpose());
|
||||
|
||||
VERIFY_IS_APPROX( m4=refMat3.col(c1)*m2.row(r), refMat4=refMat3.col(c1)*refMat2.row(r));
|
||||
VERIFY_IS_APPROX( m4=refMat3.col(c1)*m2.middleRows(r,1), refMat4=refMat3.col(c1)*refMat2.row(r));
|
||||
VERIFY_IS_APPROX(dm4=refMat3.col(c1)*m2.row(r), refMat4=refMat3.col(c1)*refMat2.row(r));
|
||||
VERIFY_IS_APPROX( m4=dm5.col(c1)*m2.row(r), refMat4=dm5.col(c1)*refMat2.row(r));
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX( m4=dm5.col(c1)*m2.middleRows(r,1), refMat4=dm5.col(c1)*refMat2.row(r));
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX(dm4=dm5.col(c1)*m2.row(r), refMat4=dm5.col(c1)*refMat2.row(r));
|
||||
|
||||
VERIFY_IS_APPROX( m4=refMat3.row(r1).transpose()*m2.row(r), refMat4=refMat3.row(r1).transpose()*refMat2.row(r));
|
||||
VERIFY_IS_APPROX(dm4=refMat3.row(r1).transpose()*m2.row(r), refMat4=refMat3.row(r1).transpose()*refMat2.row(r));
|
||||
VERIFY_IS_APPROX( m4=dm5.row(r1).transpose()*m2.row(r), refMat4=dm5.row(r1).transpose()*refMat2.row(r));
|
||||
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
|
||||
VERIFY_IS_APPROX(dm4=dm5.row(r1).transpose()*m2.row(r), refMat4=dm5.row(r1).transpose()*refMat2.row(r));
|
||||
}
|
||||
|
||||
VERIFY_IS_APPROX(m6=m6*m6, refMat6=refMat6*refMat6);
|
||||
|
@ -27,6 +27,8 @@ namespace Eigen {
|
||||
* via the <a href="http://www.holoborodko.com/pavel/mpfr">MPFR C++</a>
|
||||
* library which itself is built upon <a href="http://www.mpfr.org/">MPFR</a>/<a href="http://gmplib.org/">GMP</a>.
|
||||
*
|
||||
* \warning MPFR C++ is licensed under the GPL.
|
||||
*
|
||||
* You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder.
|
||||
*
|
||||
* Here is an example:
|
||||
@ -139,64 +141,50 @@ int main()
|
||||
public:
|
||||
typedef mpfr::mpreal ResScalar;
|
||||
enum {
|
||||
nr = 2, // must be 2 for proper packing...
|
||||
nr = 1,
|
||||
mr = 1,
|
||||
WorkSpaceFactor = nr,
|
||||
LhsProgress = 1,
|
||||
RhsProgress = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
|
||||
struct gebp_kernel<mpfr::mpreal,mpfr::mpreal,Index,mr,nr,ConjugateLhs,ConjugateRhs>
|
||||
template<typename Index, bool ConjugateLhs, bool ConjugateRhs>
|
||||
struct gebp_kernel<mpfr::mpreal,mpfr::mpreal,Index,1,1,ConjugateLhs,ConjugateRhs>
|
||||
{
|
||||
typedef mpfr::mpreal mpreal;
|
||||
|
||||
EIGEN_DONT_INLINE
|
||||
void operator()(mpreal* res, Index resStride, const mpreal* blockA, const mpreal* blockB, Index rows, Index depth, Index cols, mpreal alpha,
|
||||
Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, mpreal* /*unpackedB*/ = 0)
|
||||
Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0)
|
||||
{
|
||||
mpreal acc1, acc2, tmp;
|
||||
mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())),
|
||||
tmp (0,mpfr_get_prec(blockA[0].mpfr_srcptr()));
|
||||
|
||||
if(strideA==-1) strideA = depth;
|
||||
if(strideB==-1) strideB = depth;
|
||||
|
||||
for(Index j=0; j<cols; j+=nr)
|
||||
for(Index i=0; i<rows; ++i)
|
||||
{
|
||||
Index actual_nr = (std::min<Index>)(nr,cols-j);
|
||||
mpreal *C1 = res + j*resStride;
|
||||
mpreal *C2 = res + (j+1)*resStride;
|
||||
for(Index i=0; i<rows; i++)
|
||||
for(Index j=0; j<cols; ++j)
|
||||
{
|
||||
mpreal *B = const_cast<mpreal*>(blockB) + j*strideB + offsetB*actual_nr;
|
||||
mpreal *A = const_cast<mpreal*>(blockA) + i*strideA + offsetA;
|
||||
mpreal *C1 = res + j*resStride;
|
||||
|
||||
const mpreal *A = blockA + i*strideA + offsetA;
|
||||
const mpreal *B = blockB + j*strideB + offsetB;
|
||||
|
||||
acc1 = 0;
|
||||
acc2 = 0;
|
||||
for(Index k=0; k<depth; k++)
|
||||
{
|
||||
mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_ptr(), B[0].mpfr_ptr(), mpreal::get_default_rnd());
|
||||
mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_srcptr(), B[k].mpfr_srcptr(), mpreal::get_default_rnd());
|
||||
mpfr_add(acc1.mpfr_ptr(), acc1.mpfr_ptr(), tmp.mpfr_ptr(), mpreal::get_default_rnd());
|
||||
|
||||
if(actual_nr==2) {
|
||||
mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_ptr(), B[1].mpfr_ptr(), mpreal::get_default_rnd());
|
||||
mpfr_add(acc2.mpfr_ptr(), acc2.mpfr_ptr(), tmp.mpfr_ptr(), mpreal::get_default_rnd());
|
||||
}
|
||||
|
||||
B+=actual_nr;
|
||||
}
|
||||
|
||||
mpfr_mul(acc1.mpfr_ptr(), acc1.mpfr_ptr(), alpha.mpfr_ptr(), mpreal::get_default_rnd());
|
||||
mpfr_add(C1[i].mpfr_ptr(), C1[i].mpfr_ptr(), acc1.mpfr_ptr(), mpreal::get_default_rnd());
|
||||
|
||||
if(actual_nr==2) {
|
||||
mpfr_mul(acc2.mpfr_ptr(), acc2.mpfr_ptr(), alpha.mpfr_ptr(), mpreal::get_default_rnd());
|
||||
mpfr_add(C2[i].mpfr_ptr(), C2[i].mpfr_ptr(), acc2.mpfr_ptr(), mpreal::get_default_rnd());
|
||||
}
|
||||
mpfr_mul(acc1.mpfr_ptr(), acc1.mpfr_srcptr(), alpha.mpfr_srcptr(), mpreal::get_default_rnd());
|
||||
mpfr_add(C1[i].mpfr_ptr(), C1[i].mpfr_srcptr(), acc1.mpfr_srcptr(), mpreal::get_default_rnd());
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -32,12 +32,11 @@ void test_mpreal_support()
|
||||
VERIFY_IS_APPROX(A.array().abs2().sqrt(), A.array().abs());
|
||||
VERIFY_IS_APPROX(A.array().sin(), sin(A.array()));
|
||||
VERIFY_IS_APPROX(A.array().cos(), cos(A.array()));
|
||||
|
||||
|
||||
// Cholesky
|
||||
X = S.selfadjointView<Lower>().llt().solve(B);
|
||||
VERIFY_IS_APPROX((S.selfadjointView<Lower>()*X).eval(),B);
|
||||
|
||||
|
||||
// partial LU
|
||||
X = A.lu().solve(B);
|
||||
VERIFY_IS_APPROX((A*X).eval(),B);
|
||||
|
Loading…
x
Reference in New Issue
Block a user