mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-03-19 18:40:38 +08:00
bug #877, bug #572: Introduce a global Index typedef. Rename Sparse*::Index to StorageIndex, make Dense*::StorageIndex an alias to DenseIndex. Overall this commit gets rid of all Index conversion warnings.
This commit is contained in:
parent
6ccf97f3e6
commit
e8cdbedefb
@ -60,6 +60,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType;
|
||||
|
||||
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
|
||||
|
@ -60,6 +60,7 @@ template<typename _MatrixType, int _UpLo> class LLT
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
|
||||
enum {
|
||||
PacketSize = internal::packet_traits<Scalar>::size,
|
||||
|
@ -48,8 +48,8 @@ void cholmod_configure_matrix(CholmodType& mat)
|
||||
/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object.
|
||||
* Note that the data are shared.
|
||||
*/
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_StorageIndex>& mat)
|
||||
{
|
||||
cholmod_sparse res;
|
||||
res.nzmax = mat.nonZeros();
|
||||
@ -74,11 +74,11 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
|
||||
res.dtype = 0;
|
||||
res.stype = -1;
|
||||
|
||||
if (internal::is_same<_Index,int>::value)
|
||||
if (internal::is_same<_StorageIndex,int>::value)
|
||||
{
|
||||
res.itype = CHOLMOD_INT;
|
||||
}
|
||||
else if (internal::is_same<_Index,UF_long>::value)
|
||||
else if (internal::is_same<_StorageIndex,UF_long>::value)
|
||||
{
|
||||
res.itype = CHOLMOD_LONG;
|
||||
}
|
||||
@ -138,12 +138,12 @@ cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
|
||||
|
||||
/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix.
|
||||
* The data are not copied but shared. */
|
||||
template<typename Scalar, int Flags, typename Index>
|
||||
MappedSparseMatrix<Scalar,Flags,Index> viewAsEigen(cholmod_sparse& cm)
|
||||
template<typename Scalar, int Flags, typename StorageIndex>
|
||||
MappedSparseMatrix<Scalar,Flags,StorageIndex> viewAsEigen(cholmod_sparse& cm)
|
||||
{
|
||||
return MappedSparseMatrix<Scalar,Flags,Index>
|
||||
(cm.nrow, cm.ncol, static_cast<Index*>(cm.p)[cm.ncol],
|
||||
static_cast<Index*>(cm.p), static_cast<Index*>(cm.i),static_cast<Scalar*>(cm.x) );
|
||||
return MappedSparseMatrix<Scalar,Flags,StorageIndex>
|
||||
(cm.nrow, cm.ncol, static_cast<StorageIndex*>(cm.p)[cm.ncol],
|
||||
static_cast<StorageIndex*>(cm.p), static_cast<StorageIndex*>(cm.i),static_cast<Scalar*>(cm.x) );
|
||||
}
|
||||
|
||||
enum CholmodMode {
|
||||
@ -169,7 +169,7 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef MatrixType CholMatrixType;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
|
||||
@ -195,8 +195,8 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
cholmod_finish(&m_cholmod);
|
||||
}
|
||||
|
||||
inline Index cols() const { return m_cholmodFactor->n; }
|
||||
inline Index rows() const { return m_cholmodFactor->n; }
|
||||
inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
|
||||
inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
|
@ -50,7 +50,6 @@ template<typename Derived> class ArrayBase
|
||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
@ -179,20 +179,20 @@ struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, Stop, Sto
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { }
|
||||
};
|
||||
|
||||
template<typename Kernel, int Index, int Stop>
|
||||
template<typename Kernel, int Index_, int Stop>
|
||||
struct copy_using_evaluator_DefaultTraversal_InnerUnrolling
|
||||
{
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, typename Kernel::Index outer)
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer)
|
||||
{
|
||||
kernel.assignCoeffByOuterInner(outer, Index);
|
||||
copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Index+1, Stop>::run(kernel, outer);
|
||||
kernel.assignCoeffByOuterInner(outer, Index_);
|
||||
copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Index_+1, Stop>::run(kernel, outer);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Kernel, int Stop>
|
||||
struct copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Stop, Stop>
|
||||
{
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, typename Kernel::Index) { }
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) { }
|
||||
};
|
||||
|
||||
/***********************
|
||||
@ -246,13 +246,13 @@ struct copy_using_evaluator_innervec_CompleteUnrolling<Kernel, Stop, Stop>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { }
|
||||
};
|
||||
|
||||
template<typename Kernel, int Index, int Stop>
|
||||
template<typename Kernel, int Index_, int Stop>
|
||||
struct copy_using_evaluator_innervec_InnerUnrolling
|
||||
{
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, typename Kernel::Index outer)
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer)
|
||||
{
|
||||
kernel.template assignPacketByOuterInner<Aligned, Aligned>(outer, Index);
|
||||
enum { NextIndex = Index + packet_traits<typename Kernel::Scalar>::size };
|
||||
kernel.template assignPacketByOuterInner<Aligned, Aligned>(outer, Index_);
|
||||
enum { NextIndex = Index_ + packet_traits<typename Kernel::Scalar>::size };
|
||||
copy_using_evaluator_innervec_InnerUnrolling<Kernel, NextIndex, Stop>::run(kernel, outer);
|
||||
}
|
||||
};
|
||||
@ -260,7 +260,7 @@ struct copy_using_evaluator_innervec_InnerUnrolling
|
||||
template<typename Kernel, int Stop>
|
||||
struct copy_using_evaluator_innervec_InnerUnrolling<Kernel, Stop, Stop>
|
||||
{
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, typename Kernel::Index) { }
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, Index) { }
|
||||
};
|
||||
|
||||
/***************************************************************************
|
||||
@ -283,8 +283,6 @@ struct dense_assignment_loop<Kernel, DefaultTraversal, NoUnrolling>
|
||||
{
|
||||
EIGEN_DEVICE_FUNC static void run(Kernel &kernel)
|
||||
{
|
||||
typedef typename Kernel::Index Index;
|
||||
|
||||
for(Index outer = 0; outer < kernel.outerSize(); ++outer) {
|
||||
for(Index inner = 0; inner < kernel.innerSize(); ++inner) {
|
||||
kernel.assignCoeffByOuterInner(outer, inner);
|
||||
@ -306,7 +304,7 @@ struct dense_assignment_loop<Kernel, DefaultTraversal, CompleteUnrolling>
|
||||
template<typename Kernel>
|
||||
struct dense_assignment_loop<Kernel, DefaultTraversal, InnerUnrolling>
|
||||
{
|
||||
typedef typename Kernel::Index Index;
|
||||
typedef typename Kernel::StorageIndex StorageIndex;
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
|
||||
{
|
||||
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
|
||||
@ -330,7 +328,7 @@ struct unaligned_dense_assignment_loop
|
||||
{
|
||||
// if IsAligned = true, then do nothing
|
||||
template <typename Kernel>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, typename Kernel::Index, typename Kernel::Index) {}
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index, Index) {}
|
||||
};
|
||||
|
||||
template <>
|
||||
@ -342,16 +340,16 @@ struct unaligned_dense_assignment_loop<false>
|
||||
#if EIGEN_COMP_MSVC
|
||||
template <typename Kernel>
|
||||
static EIGEN_DONT_INLINE void run(Kernel &kernel,
|
||||
typename Kernel::Index start,
|
||||
typename Kernel::Index end)
|
||||
Index start,
|
||||
Index end)
|
||||
#else
|
||||
template <typename Kernel>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel,
|
||||
typename Kernel::Index start,
|
||||
typename Kernel::Index end)
|
||||
Index start,
|
||||
Index end)
|
||||
#endif
|
||||
{
|
||||
for (typename Kernel::Index index = start; index < end; ++index)
|
||||
for (Index index = start; index < end; ++index)
|
||||
kernel.assignCoeff(index);
|
||||
}
|
||||
};
|
||||
@ -361,8 +359,6 @@ struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, NoUnrolling>
|
||||
{
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
|
||||
{
|
||||
typedef typename Kernel::Index Index;
|
||||
|
||||
const Index size = kernel.size();
|
||||
typedef packet_traits<typename Kernel::Scalar> PacketTraits;
|
||||
enum {
|
||||
@ -386,7 +382,7 @@ struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, NoUnrolling>
|
||||
template<typename Kernel>
|
||||
struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, CompleteUnrolling>
|
||||
{
|
||||
typedef typename Kernel::Index Index;
|
||||
typedef typename Kernel::StorageIndex StorageIndex;
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
|
||||
{
|
||||
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
|
||||
@ -409,8 +405,6 @@ struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, NoUnrolling>
|
||||
{
|
||||
EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel)
|
||||
{
|
||||
typedef typename Kernel::Index Index;
|
||||
|
||||
const Index innerSize = kernel.innerSize();
|
||||
const Index outerSize = kernel.outerSize();
|
||||
const Index packetSize = packet_traits<typename Kernel::Scalar>::size;
|
||||
@ -433,7 +427,7 @@ struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, CompleteUnrolling
|
||||
template<typename Kernel>
|
||||
struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, InnerUnrolling>
|
||||
{
|
||||
typedef typename Kernel::Index Index;
|
||||
typedef typename Kernel::StorageIndex StorageIndex;
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
|
||||
{
|
||||
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
|
||||
@ -452,7 +446,6 @@ struct dense_assignment_loop<Kernel, LinearTraversal, NoUnrolling>
|
||||
{
|
||||
EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel)
|
||||
{
|
||||
typedef typename Kernel::Index Index;
|
||||
const Index size = kernel.size();
|
||||
for(Index i = 0; i < size; ++i)
|
||||
kernel.assignCoeff(i);
|
||||
@ -478,7 +471,6 @@ struct dense_assignment_loop<Kernel, SliceVectorizedTraversal, NoUnrolling>
|
||||
{
|
||||
EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel)
|
||||
{
|
||||
typedef typename Kernel::Index Index;
|
||||
typedef packet_traits<typename Kernel::Scalar> PacketTraits;
|
||||
enum {
|
||||
packetSize = PacketTraits::size,
|
||||
@ -533,7 +525,7 @@ public:
|
||||
typedef DstEvaluatorTypeT DstEvaluatorType;
|
||||
typedef SrcEvaluatorTypeT SrcEvaluatorType;
|
||||
typedef typename DstEvaluatorType::Scalar Scalar;
|
||||
typedef typename DstEvaluatorType::Index Index;
|
||||
typedef typename DstEvaluatorType::StorageIndex StorageIndex;
|
||||
typedef copy_using_evaluator_traits<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor> AssignmentTraits;
|
||||
|
||||
|
||||
@ -731,8 +723,8 @@ EIGEN_DEVICE_FUNC void call_assignment_no_alias(Dst& dst, const Src& src, const
|
||||
&& int(Dst::SizeAtCompileTime) != 1
|
||||
};
|
||||
|
||||
typename Dst::Index dstRows = NeedToTranspose ? src.cols() : src.rows();
|
||||
typename Dst::Index dstCols = NeedToTranspose ? src.rows() : src.cols();
|
||||
Index dstRows = NeedToTranspose ? src.cols() : src.rows();
|
||||
Index dstCols = NeedToTranspose ? src.rows() : src.cols();
|
||||
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
||||
dst.resize(dstRows, dstCols);
|
||||
|
||||
|
@ -32,7 +32,7 @@ class BandMatrixBase : public EigenBase<Derived>
|
||||
};
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
|
||||
typedef typename DenseMatrixType::Index Index;
|
||||
typedef typename DenseMatrixType::StorageIndex StorageIndex;
|
||||
typedef typename internal::traits<Derived>::CoefficientsType CoefficientsType;
|
||||
typedef EigenBase<Derived> Base;
|
||||
|
||||
@ -179,7 +179,7 @@ struct traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Dense StorageKind;
|
||||
typedef DenseIndex Index;
|
||||
typedef DenseIndex StorageIndex;
|
||||
enum {
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
RowsAtCompileTime = _Rows,
|
||||
@ -201,7 +201,7 @@ class BandMatrix : public BandMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Sub
|
||||
public:
|
||||
|
||||
typedef typename internal::traits<BandMatrix>::Scalar Scalar;
|
||||
typedef typename internal::traits<BandMatrix>::Index Index;
|
||||
typedef typename internal::traits<BandMatrix>::StorageIndex StorageIndex;
|
||||
typedef typename internal::traits<BandMatrix>::CoefficientsType CoefficientsType;
|
||||
|
||||
explicit inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs)
|
||||
@ -241,7 +241,7 @@ struct traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Opt
|
||||
{
|
||||
typedef typename _CoefficientsType::Scalar Scalar;
|
||||
typedef typename _CoefficientsType::StorageKind StorageKind;
|
||||
typedef typename _CoefficientsType::Index Index;
|
||||
typedef typename _CoefficientsType::StorageIndex StorageIndex;
|
||||
enum {
|
||||
CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost,
|
||||
RowsAtCompileTime = _Rows,
|
||||
@ -264,7 +264,7 @@ class BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsT
|
||||
|
||||
typedef typename internal::traits<BandMatrixWrapper>::Scalar Scalar;
|
||||
typedef typename internal::traits<BandMatrixWrapper>::CoefficientsType CoefficientsType;
|
||||
typedef typename internal::traits<BandMatrixWrapper>::Index Index;
|
||||
typedef typename internal::traits<BandMatrixWrapper>::StorageIndex StorageIndex;
|
||||
|
||||
explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs)
|
||||
: m_coeffs(coeffs),
|
||||
@ -312,7 +312,7 @@ template<typename Scalar, int Size, int Options>
|
||||
class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor>
|
||||
{
|
||||
typedef BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> Base;
|
||||
typedef typename Base::Index Index;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
public:
|
||||
explicit TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {}
|
||||
|
||||
|
@ -154,7 +154,7 @@ class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense>
|
||||
: public internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel>
|
||||
{
|
||||
typedef internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> Impl;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
public:
|
||||
typedef Impl Base;
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
|
||||
@ -306,13 +306,13 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
Index startRow() const
|
||||
StorageIndex startRow() const
|
||||
{
|
||||
return m_startRow.value();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
Index startCol() const
|
||||
StorageIndex startCol() const
|
||||
{
|
||||
return m_startCol.value();
|
||||
}
|
||||
@ -320,10 +320,10 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
|
||||
protected:
|
||||
|
||||
const typename XprType::Nested m_xpr;
|
||||
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
|
||||
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
|
||||
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
|
||||
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
|
||||
const internal::variable_if_dynamic<StorageIndex, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
|
||||
const internal::variable_if_dynamic<StorageIndex, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
|
||||
const internal::variable_if_dynamic<StorageIndex, RowsAtCompileTime> m_blockRows;
|
||||
const internal::variable_if_dynamic<StorageIndex, ColsAtCompileTime> m_blockCols;
|
||||
};
|
||||
|
||||
/** \internal Internal implementation of dense Blocks in the direct access case.*/
|
||||
|
@ -28,7 +28,7 @@ template<typename XprType>
|
||||
struct CommaInitializer
|
||||
{
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline CommaInitializer(XprType& xpr, const Scalar& s)
|
||||
|
@ -111,7 +111,7 @@ struct evaluator_base
|
||||
typedef evaluator<ExpressionType> type;
|
||||
typedef evaluator<ExpressionType> nestedType;
|
||||
|
||||
typedef typename traits<ExpressionType>::Index Index;
|
||||
typedef typename traits<ExpressionType>::StorageIndex StorageIndex;
|
||||
// TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
|
||||
typedef traits<ExpressionType> ExpressionTraits;
|
||||
};
|
||||
@ -128,7 +128,7 @@ struct evaluator<PlainObjectBase<Derived> >
|
||||
: evaluator_base<Derived>
|
||||
{
|
||||
typedef PlainObjectBase<Derived> PlainObjectType;
|
||||
typedef typename PlainObjectType::Index Index;
|
||||
typedef typename PlainObjectType::StorageIndex StorageIndex;
|
||||
typedef typename PlainObjectType::Scalar Scalar;
|
||||
typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename PlainObjectType::PacketScalar PacketScalar;
|
||||
@ -264,7 +264,7 @@ struct unary_evaluator<Transpose<ArgType>, IndexBased>
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
|
||||
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketScalar PacketScalar;
|
||||
@ -343,7 +343,7 @@ struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
|
||||
: m_functor(n.functor())
|
||||
{ }
|
||||
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketScalar PacketScalar;
|
||||
|
||||
@ -394,7 +394,7 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
|
||||
m_argImpl(op.nestedExpression())
|
||||
{ }
|
||||
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketScalar PacketScalar;
|
||||
|
||||
@ -469,7 +469,7 @@ struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBase
|
||||
m_rhsImpl(xpr.rhs())
|
||||
{ }
|
||||
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketScalar PacketScalar;
|
||||
|
||||
@ -522,7 +522,7 @@ struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
|
||||
m_argImpl(op.nestedExpression())
|
||||
{ }
|
||||
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
|
||||
@ -563,7 +563,7 @@ struct mapbase_evaluator : evaluator_base<Derived>
|
||||
{
|
||||
typedef Derived XprType;
|
||||
typedef typename XprType::PointerType PointerType;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketScalar PacketScalar;
|
||||
@ -760,7 +760,7 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
|
||||
m_startCol(block.startCol())
|
||||
{ }
|
||||
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketScalar PacketScalar;
|
||||
@ -865,7 +865,7 @@ struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
|
||||
m_elseImpl(select.elseMatrix())
|
||||
{ }
|
||||
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
|
||||
inline EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const
|
||||
@ -898,7 +898,7 @@ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
|
||||
: evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
|
||||
{
|
||||
typedef Replicate<ArgType, RowFactor, ColFactor> XprType;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketReturnType PacketReturnType;
|
||||
enum {
|
||||
@ -981,7 +981,7 @@ struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >
|
||||
: m_expr(expr)
|
||||
{}
|
||||
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
|
||||
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const
|
||||
@ -1016,7 +1016,7 @@ struct evaluator_wrapper_base
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
|
||||
|
||||
typedef typename ArgType::Index Index;
|
||||
typedef typename ArgType::StorageIndex StorageIndex;
|
||||
typedef typename ArgType::Scalar Scalar;
|
||||
typedef typename ArgType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename ArgType::PacketScalar PacketScalar;
|
||||
@ -1103,7 +1103,7 @@ struct unary_evaluator<Reverse<ArgType, Direction> >
|
||||
: evaluator_base<Reverse<ArgType, Direction> >
|
||||
{
|
||||
typedef Reverse<ArgType, Direction> XprType;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketScalar PacketScalar;
|
||||
@ -1219,7 +1219,7 @@ struct evaluator<Diagonal<ArgType, DiagIndex> >
|
||||
m_index(diagonal.index())
|
||||
{ }
|
||||
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
|
||||
|
@ -36,7 +36,7 @@ protected:
|
||||
typedef internal::inner_iterator_selector<XprType, typename internal::evaluator_traits<XprType>::Kind> IteratorType;
|
||||
typedef typename internal::evaluator<XprType>::type EvaluatorType;
|
||||
typedef typename internal::traits<XprType>::Scalar Scalar;
|
||||
typedef typename internal::traits<XprType>::Index Index;
|
||||
typedef typename internal::traits<XprType>::StorageIndex StorageIndex;
|
||||
public:
|
||||
/** Construct an iterator over the \a outerId -th row or column of \a xpr */
|
||||
InnerIterator(const XprType &xpr, const Index &outerId)
|
||||
@ -50,11 +50,11 @@ public:
|
||||
*/
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; }
|
||||
/// \returns the column or row index of the current coefficient.
|
||||
EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_iter.index(); }
|
||||
/// \returns the row index of the current coefficient.
|
||||
EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex row() const { return m_iter.row(); }
|
||||
/// \returns the column index of the current coefficient.
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex col() const { return m_iter.col(); }
|
||||
/// \returns \c true if the iterator \c *this still references a valid coefficient.
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
|
||||
|
||||
@ -77,7 +77,7 @@ class inner_iterator_selector<XprType, IndexBased>
|
||||
protected:
|
||||
typedef typename evaluator<XprType>::type EvaluatorType;
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
typedef typename traits<XprType>::Index Index;
|
||||
typedef typename traits<XprType>::StorageIndex StorageIndex;
|
||||
enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
|
||||
|
||||
public:
|
||||
@ -93,9 +93,9 @@ public:
|
||||
|
||||
EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; }
|
||||
|
||||
EIGEN_STRONG_INLINE Index index() const { return m_inner; }
|
||||
inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; }
|
||||
inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
|
||||
|
||||
@ -115,7 +115,7 @@ class inner_iterator_selector<XprType, IteratorBased>
|
||||
protected:
|
||||
typedef typename evaluator<XprType>::InnerIterator Base;
|
||||
typedef typename evaluator<XprType>::type EvaluatorType;
|
||||
typedef typename traits<XprType>::Index Index;
|
||||
typedef typename traits<XprType>::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/)
|
||||
|
@ -59,8 +59,8 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
|
||||
typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind,
|
||||
typename traits<Rhs>::StorageKind,
|
||||
BinaryOp>::ret StorageKind;
|
||||
typedef typename promote_index_type<typename traits<Lhs>::Index,
|
||||
typename traits<Rhs>::Index>::type Index;
|
||||
typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,
|
||||
typename traits<Rhs>::StorageIndex>::type StorageIndex;
|
||||
typedef typename Lhs::Nested LhsNested;
|
||||
typedef typename Rhs::Nested RhsNested;
|
||||
typedef typename remove_reference<LhsNested>::type _LhsNested;
|
||||
@ -111,7 +111,7 @@ class CwiseBinaryOp :
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index rows() const {
|
||||
EIGEN_STRONG_INLINE StorageIndex rows() const {
|
||||
// return the fixed size type if available to enable compile time optimizations
|
||||
if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic)
|
||||
return m_rhs.rows();
|
||||
@ -119,7 +119,7 @@ class CwiseBinaryOp :
|
||||
return m_lhs.rows();
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index cols() const {
|
||||
EIGEN_STRONG_INLINE StorageIndex cols() const {
|
||||
// return the fixed size type if available to enable compile time optimizations
|
||||
if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic)
|
||||
return m_rhs.cols();
|
||||
|
@ -66,9 +66,9 @@ class CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal
|
||||
: m_xpr(xpr), m_functor(func) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex rows() const { return m_xpr.rows(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex cols() const { return m_xpr.cols(); }
|
||||
|
||||
/** \returns the functor representing the unary operation */
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
@ -58,11 +58,19 @@ template<typename Derived> class DenseBase
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
|
||||
/** \brief The type of indices
|
||||
/** \brief The interface type of indices
|
||||
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
|
||||
* \sa \ref TopicPreprocessorDirectives.
|
||||
* \sa \ref TopicPreprocessorDirectives, StorageIndex.
|
||||
*/
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef Eigen::Index Index;
|
||||
|
||||
/**
|
||||
* \brief The type used to store indices
|
||||
* \details This typedef is relevant for types that store multiple indices such as
|
||||
* PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index
|
||||
* \sa \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase.
|
||||
*/
|
||||
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
|
||||
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
|
@ -35,7 +35,6 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
|
||||
{
|
||||
public:
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
|
||||
@ -287,7 +286,6 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
|
||||
typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
@ -450,7 +448,6 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
|
||||
public:
|
||||
|
||||
typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
@ -525,7 +522,6 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
|
||||
public:
|
||||
|
||||
typedef DenseCoeffsBase<Derived, WriteAccessors> Base;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
|
@ -70,28 +70,28 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {}
|
||||
explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(internal::convert_index<StorageIndex>(a_index)) {}
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index rows() const
|
||||
inline StorageIndex rows() const
|
||||
{
|
||||
return m_index.value()<0 ? numext::mini(Index(m_matrix.cols()),Index(m_matrix.rows()+m_index.value()))
|
||||
: numext::mini(Index(m_matrix.rows()),Index(m_matrix.cols()-m_index.value()));
|
||||
return m_index.value()<0 ? numext::mini<StorageIndex>(m_matrix.cols(),m_matrix.rows()+m_index.value())
|
||||
: numext::mini<StorageIndex>(m_matrix.rows(),m_matrix.cols()-m_index.value());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index cols() const { return 1; }
|
||||
inline StorageIndex cols() const { return 1; }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index innerStride() const
|
||||
inline StorageIndex innerStride() const
|
||||
{
|
||||
return m_matrix.outerStride() + 1;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index outerStride() const
|
||||
inline StorageIndex outerStride() const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -153,23 +153,23 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index index() const
|
||||
inline StorageIndex index() const
|
||||
{
|
||||
return m_index.value();
|
||||
}
|
||||
|
||||
protected:
|
||||
typename MatrixType::Nested m_matrix;
|
||||
const internal::variable_if_dynamicindex<Index, DiagIndex> m_index;
|
||||
const internal::variable_if_dynamicindex<StorageIndex, DiagIndex> m_index;
|
||||
|
||||
private:
|
||||
// some compilers may fail to optimize std::max etc in case of compile-time constants...
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
|
||||
EIGEN_STRONG_INLINE StorageIndex colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
|
||||
// trigger a compile time error is someone try to call packet
|
||||
template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const;
|
||||
template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const;
|
||||
|
@ -22,7 +22,7 @@ class DiagonalBase : public EigenBase<Derived>
|
||||
typedef typename DiagonalVectorType::Scalar Scalar;
|
||||
typedef typename DiagonalVectorType::RealScalar RealScalar;
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
|
||||
|
||||
enum {
|
||||
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
|
||||
@ -108,7 +108,7 @@ struct traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
|
||||
{
|
||||
typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
|
||||
typedef DiagonalShape StorageKind;
|
||||
typedef DenseIndex Index;
|
||||
// typedef DenseIndex Index;
|
||||
enum {
|
||||
Flags = LvalueBit | NoPreferredStorageOrderBit
|
||||
};
|
||||
@ -124,7 +124,7 @@ class DiagonalMatrix
|
||||
typedef const DiagonalMatrix& Nested;
|
||||
typedef _Scalar Scalar;
|
||||
typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<DiagonalMatrix>::Index Index;
|
||||
typedef typename internal::traits<DiagonalMatrix>::StorageIndex StorageIndex;
|
||||
#endif
|
||||
|
||||
protected:
|
||||
@ -230,7 +230,7 @@ struct traits<DiagonalWrapper<_DiagonalVectorType> >
|
||||
{
|
||||
typedef _DiagonalVectorType DiagonalVectorType;
|
||||
typedef typename DiagonalVectorType::Scalar Scalar;
|
||||
typedef typename DiagonalVectorType::Index Index;
|
||||
typedef typename DiagonalVectorType::StorageIndex StorageIndex;
|
||||
typedef DiagonalShape StorageKind;
|
||||
typedef typename traits<DiagonalVectorType>::XprKind XprKind;
|
||||
enum {
|
||||
|
@ -28,7 +28,7 @@ template<typename Derived> struct EigenBase
|
||||
// typedef typename internal::plain_matrix_type<Derived>::type PlainObject;
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
|
||||
|
||||
/** \returns a reference to the derived object */
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -46,14 +46,14 @@ template<typename Derived> struct EigenBase
|
||||
|
||||
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index rows() const { return derived().rows(); }
|
||||
inline StorageIndex rows() const { return derived().rows(); }
|
||||
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index cols() const { return derived().cols(); }
|
||||
inline StorageIndex cols() const { return derived().cols(); }
|
||||
/** \returns the number of coefficients, which is rows()*cols().
|
||||
* \sa rows(), cols(), SizeAtCompileTime. */
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index size() const { return rows() * cols(); }
|
||||
inline StorageIndex size() const { return rows() * cols(); }
|
||||
|
||||
/** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */
|
||||
template<typename Dest>
|
||||
|
@ -37,7 +37,6 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
||||
};
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
@ -107,7 +107,7 @@ struct traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Dense StorageKind;
|
||||
typedef DenseIndex Index;
|
||||
typedef DenseIndex StorageIndex;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
RowsAtCompileTime = _Rows,
|
||||
|
@ -52,7 +52,8 @@ template<typename Derived> class MatrixBase
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef MatrixBase StorageBaseType;
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef Eigen::Index Index;
|
||||
typedef Index StorageIndex;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
@ -67,7 +67,7 @@ class PermutationBase : public EigenBase<Derived>
|
||||
MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
|
||||
};
|
||||
typedef typename Traits::StorageIndexType StorageIndexType;
|
||||
typedef typename Traits::Index Index;
|
||||
typedef typename Traits::StorageIndex StorageIndex;
|
||||
typedef Matrix<StorageIndexType,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>
|
||||
DenseMatrixType;
|
||||
typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,StorageIndexType>
|
||||
@ -277,7 +277,7 @@ struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _Storag
|
||||
{
|
||||
typedef PermutationStorage StorageKind;
|
||||
typedef Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
typedef typename IndicesType::StorageIndex StorageIndex;
|
||||
typedef _StorageIndexType StorageIndexType;
|
||||
};
|
||||
}
|
||||
@ -294,7 +294,7 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename Traits::StorageIndexType StorageIndexType;
|
||||
typedef typename Traits::Index Index;
|
||||
typedef typename Traits::StorageIndex StorageIndex;
|
||||
#endif
|
||||
|
||||
inline PermutationMatrix()
|
||||
@ -402,7 +402,7 @@ struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _St
|
||||
{
|
||||
typedef PermutationStorage StorageKind;
|
||||
typedef Map<const Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
typedef typename IndicesType::StorageIndex StorageIndex;
|
||||
typedef _StorageIndexType StorageIndexType;
|
||||
};
|
||||
}
|
||||
@ -418,7 +418,7 @@ class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageInd
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename IndicesType::Scalar StorageIndexType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
typedef typename IndicesType::StorageIndex StorageIndex;
|
||||
#endif
|
||||
|
||||
inline Map(const StorageIndexType* indicesPtr)
|
||||
@ -480,7 +480,7 @@ struct traits<PermutationWrapper<_IndicesType> >
|
||||
typedef PermutationStorage StorageKind;
|
||||
typedef typename _IndicesType::Scalar Scalar;
|
||||
typedef typename _IndicesType::Scalar StorageIndexType;
|
||||
typedef typename _IndicesType::Index Index;
|
||||
typedef typename _IndicesType::StorageIndex StorageIndex;
|
||||
typedef _IndicesType IndicesType;
|
||||
enum {
|
||||
RowsAtCompileTime = _IndicesType::SizeAtCompileTime,
|
||||
@ -558,7 +558,7 @@ struct permut_matrix_product_retval
|
||||
: public ReturnByValue<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
|
||||
{
|
||||
typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
|
||||
permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
|
||||
: m_permutation(perm), m_matrix(matrix)
|
||||
@ -650,7 +650,7 @@ class Transpose<PermutationBase<Derived> >
|
||||
MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
|
||||
};
|
||||
typedef typename Traits::Scalar Scalar;
|
||||
typedef typename Traits::Index Index;
|
||||
typedef typename Traits::StorageIndex StorageIndex;
|
||||
#endif
|
||||
|
||||
Transpose(const PermutationType& p) : m_permutation(p) {}
|
||||
|
@ -95,7 +95,6 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||
typedef typename internal::dense_xpr_base<Derived>::type Base;
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
@ -846,7 +845,6 @@ namespace internal {
|
||||
template <typename Derived, typename OtherDerived, bool IsVector>
|
||||
struct conservative_resize_like_impl
|
||||
{
|
||||
typedef typename Derived::Index Index;
|
||||
static void run(DenseBase<Derived>& _this, Index rows, Index cols)
|
||||
{
|
||||
if (_this.rows() == rows && _this.cols() == cols) return;
|
||||
@ -912,7 +910,6 @@ struct conservative_resize_like_impl<Derived,OtherDerived,true>
|
||||
{
|
||||
using conservative_resize_like_impl<Derived,OtherDerived,false>::run;
|
||||
|
||||
typedef typename Derived::Index Index;
|
||||
static void run(DenseBase<Derived>& _this, Index size)
|
||||
{
|
||||
const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
|
||||
|
@ -67,8 +67,8 @@ struct traits<Product<Lhs, Rhs, Option> >
|
||||
typedef typename product_promote_storage_type<typename LhsTraits::StorageKind,
|
||||
typename RhsTraits::StorageKind,
|
||||
internal::product_type<Lhs,Rhs>::ret>::ret StorageKind;
|
||||
typedef typename promote_index_type<typename LhsTraits::Index,
|
||||
typename RhsTraits::Index>::type Index;
|
||||
typedef typename promote_index_type<typename LhsTraits::StorageIndex,
|
||||
typename RhsTraits::StorageIndex>::type StorageIndex;
|
||||
|
||||
enum {
|
||||
RowsAtCompileTime = LhsTraits::RowsAtCompileTime,
|
||||
@ -120,8 +120,8 @@ class Product : public ProductImpl<_Lhs,_Rhs,Option,
|
||||
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions");
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }
|
||||
EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }
|
||||
EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return m_lhs.rows(); }
|
||||
EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return m_rhs.cols(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; }
|
||||
EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; }
|
||||
@ -149,7 +149,7 @@ class dense_product_base<Lhs, Rhs, Option, InnerProduct>
|
||||
public:
|
||||
using Base::derived;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::Index Index;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
|
||||
operator const Scalar() const
|
||||
{
|
||||
|
@ -210,7 +210,6 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>
|
||||
template<typename Dst, typename Lhs, typename Rhs, typename Func>
|
||||
EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&)
|
||||
{
|
||||
typedef typename Dst::Index Index;
|
||||
// FIXME make sure lhs is sequentially stored
|
||||
// FIXME not very good if rhs is real and lhs complex while alpha is real too
|
||||
// FIXME we should probably build an evaluator for dst and rhs
|
||||
@ -222,7 +221,6 @@ EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, cons
|
||||
// Row major result
|
||||
template<typename Dst, typename Lhs, typename Rhs, typename Func>
|
||||
EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&) {
|
||||
typedef typename Dst::Index Index;
|
||||
// FIXME make sure rhs is sequentially stored
|
||||
// FIXME not very good if lhs is real and rhs complex while alpha is real too
|
||||
// FIXME we should probably build an evaluator for dst and lhs
|
||||
@ -372,7 +370,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
|
||||
: evaluator_base<Product<Lhs, Rhs, LazyProduct> >
|
||||
{
|
||||
typedef Product<Lhs, Rhs, LazyProduct> XprType;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketScalar PacketScalar;
|
||||
@ -524,7 +522,7 @@ struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, LazyCoeffBasedProduc
|
||||
template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
|
||||
{
|
||||
etor_product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
|
||||
@ -535,7 +533,7 @@ struct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, Load
|
||||
template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
|
||||
{
|
||||
etor_product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
|
||||
@ -546,7 +544,7 @@ struct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, Load
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
|
||||
{
|
||||
res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
|
||||
@ -556,7 +554,7 @@ struct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
|
||||
{
|
||||
res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
|
||||
@ -566,7 +564,7 @@ struct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
|
||||
{
|
||||
eigen_assert(innerDim>0 && "you are using a non initialized matrix");
|
||||
@ -579,7 +577,7 @@ struct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct etor_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
|
||||
{
|
||||
eigen_assert(innerDim>0 && "you are using a non initialized matrix");
|
||||
@ -668,7 +666,7 @@ template<typename MatrixType, typename DiagonalType, typename Derived, int Produ
|
||||
struct diagonal_product_evaluator_base
|
||||
: evaluator_base<Derived>
|
||||
{
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename scalar_product_traits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
public:
|
||||
@ -733,7 +731,7 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalSha
|
||||
using Base::coeff;
|
||||
using Base::packet_impl;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::Index Index;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
typedef typename Base::PacketScalar PacketScalar;
|
||||
|
||||
typedef Product<Lhs, Rhs, ProductKind> XprType;
|
||||
@ -781,7 +779,7 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DenseShape,
|
||||
using Base::coeff;
|
||||
using Base::packet_impl;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::Index Index;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
typedef typename Base::PacketScalar PacketScalar;
|
||||
|
||||
typedef Product<Lhs, Rhs, ProductKind> XprType;
|
||||
|
@ -61,8 +61,8 @@ template<typename Derived> class ReturnByValue
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline void evalTo(Dest& dst) const
|
||||
{ static_cast<const Derived*>(this)->evalTo(dst); }
|
||||
EIGEN_DEVICE_FUNC inline Index rows() const { return static_cast<const Derived*>(this)->rows(); }
|
||||
EIGEN_DEVICE_FUNC inline Index cols() const { return static_cast<const Derived*>(this)->cols(); }
|
||||
EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return static_cast<const Derived*>(this)->rows(); }
|
||||
EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return static_cast<const Derived*>(this)->cols(); }
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
|
||||
|
@ -59,7 +59,7 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
|
||||
/** \brief The type of coefficients in this matrix */
|
||||
typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
|
||||
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
|
||||
enum {
|
||||
Mode = internal::traits<SelfAdjointView>::Mode,
|
||||
@ -224,7 +224,7 @@ public:
|
||||
typedef typename Base::DstEvaluatorType DstEvaluatorType;
|
||||
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::Index Index;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
typedef typename Base::AssignmentTraits AssignmentTraits;
|
||||
|
||||
|
||||
|
@ -48,6 +48,7 @@ struct traits<Solve<Decomposition, RhsType> >
|
||||
: traits<typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject>
|
||||
{
|
||||
typedef typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject PlainObject;
|
||||
typedef typename promote_index_type<typename Decomposition::StorageIndex, typename RhsType::StorageIndex>::type StorageIndex;
|
||||
typedef traits<PlainObject> BaseTraits;
|
||||
enum {
|
||||
Flags = BaseTraits::Flags & RowMajorBit,
|
||||
@ -62,15 +63,15 @@ template<typename Decomposition, typename RhsType>
|
||||
class Solve : public SolveImpl<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>
|
||||
{
|
||||
public:
|
||||
typedef typename RhsType::Index Index;
|
||||
typedef typename internal::traits<Solve>::PlainObject PlainObject;
|
||||
typedef typename internal::traits<Solve>::StorageIndex StorageIndex;
|
||||
|
||||
Solve(const Decomposition &dec, const RhsType &rhs)
|
||||
: m_dec(dec), m_rhs(rhs)
|
||||
{}
|
||||
|
||||
EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); }
|
||||
EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); }
|
||||
EIGEN_DEVICE_FUNC StorageIndex rows() const { return m_dec.cols(); }
|
||||
EIGEN_DEVICE_FUNC StorageIndex cols() const { return m_rhs.cols(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; }
|
||||
EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; }
|
||||
|
@ -55,7 +55,6 @@ inline typename NumTraits<typename traits<Derived>::Scalar>::Real
|
||||
blueNorm_impl(const EigenBase<Derived>& _vec)
|
||||
{
|
||||
typedef typename Derived::RealScalar RealScalar;
|
||||
typedef typename Derived::Index Index;
|
||||
using std::pow;
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
|
@ -28,7 +28,7 @@ protected:
|
||||
|
||||
public:
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::Index Index;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
typedef typename Base::DstXprType DstXprType;
|
||||
typedef swap_assign_op<Scalar> Functor;
|
||||
|
||||
|
@ -29,14 +29,10 @@ namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template<typename MatrixType>
|
||||
struct traits<Transpose<MatrixType> >
|
||||
struct traits<Transpose<MatrixType> > : public traits<MatrixType>
|
||||
{
|
||||
typedef typename traits<MatrixType>::Scalar Scalar;
|
||||
typedef typename traits<MatrixType>::Index Index;
|
||||
typedef typename nested<MatrixType>::type MatrixTypeNested;
|
||||
typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain;
|
||||
typedef typename traits<MatrixType>::StorageKind StorageKind;
|
||||
typedef typename traits<MatrixType>::XprKind XprKind;
|
||||
enum {
|
||||
RowsAtCompileTime = MatrixType::ColsAtCompileTime,
|
||||
ColsAtCompileTime = MatrixType::RowsAtCompileTime,
|
||||
@ -68,8 +64,8 @@ template<typename MatrixType> class Transpose
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); }
|
||||
EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); }
|
||||
EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return m_matrix.cols(); }
|
||||
EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return m_matrix.rows(); }
|
||||
|
||||
/** \returns the nested expression */
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
@ -45,7 +45,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
|
||||
};
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
|
||||
typedef typename internal::traits<Derived>::FullMatrixType DenseMatrixType;
|
||||
typedef DenseMatrixType DenseType;
|
||||
typedef Derived const& Nested;
|
||||
@ -54,9 +54,9 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
|
||||
inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index rows() const { return derived().rows(); }
|
||||
inline StorageIndex rows() const { return derived().rows(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index cols() const { return derived().cols(); }
|
||||
inline StorageIndex cols() const { return derived().cols(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index outerStride() const { return derived().outerStride(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -199,7 +199,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
||||
public:
|
||||
|
||||
typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<TriangularView>::Index Index;
|
||||
typedef typename internal::traits<TriangularView>::StorageIndex StorageIndex;
|
||||
typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned NestedExpression;
|
||||
|
||||
enum {
|
||||
@ -222,9 +222,9 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
||||
{ return Base::operator=(other); }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline StorageIndex rows() const { return m_matrix.rows(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
inline StorageIndex cols() const { return m_matrix.cols(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
const NestedExpression& nestedExpression() const { return m_matrix; }
|
||||
@ -325,7 +325,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
|
||||
using Base::derived;
|
||||
|
||||
typedef typename internal::traits<TriangularViewType>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<TriangularViewType>::Index Index;
|
||||
typedef typename internal::traits<TriangularViewType>::StorageIndex StorageIndex;
|
||||
|
||||
enum {
|
||||
Mode = _Mode,
|
||||
@ -688,7 +688,7 @@ public:
|
||||
typedef typename Base::DstEvaluatorType DstEvaluatorType;
|
||||
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::Index Index;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
typedef typename Base::AssignmentTraits AssignmentTraits;
|
||||
|
||||
|
||||
@ -831,7 +831,7 @@ struct triangular_assignment_loop<Kernel, Mode, 0, SetOpposite>
|
||||
template<typename Kernel, unsigned int Mode, bool SetOpposite>
|
||||
struct triangular_assignment_loop<Kernel, Mode, Dynamic, SetOpposite>
|
||||
{
|
||||
typedef typename Kernel::Index Index;
|
||||
typedef typename Kernel::StorageIndex StorageIndex;
|
||||
typedef typename Kernel::Scalar Scalar;
|
||||
EIGEN_DEVICE_FUNC
|
||||
static inline void run(Kernel &kernel)
|
||||
|
@ -629,7 +629,7 @@ namespace Eigen {
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
|
||||
typedef typename Eigen::internal::nested<Derived>::type Nested; \
|
||||
typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
|
||||
typedef typename Eigen::internal::traits<Derived>::Index Index; \
|
||||
typedef typename Eigen::internal::traits<Derived>::StorageIndex StorageIndex; \
|
||||
enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
|
||||
ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
|
||||
Flags = Eigen::internal::traits<Derived>::Flags, \
|
||||
@ -639,23 +639,13 @@ namespace Eigen {
|
||||
|
||||
|
||||
#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
|
||||
typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
|
||||
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
|
||||
typedef typename Base::PacketScalar PacketScalar; \
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
|
||||
typedef typename Eigen::internal::nested<Derived>::type Nested; \
|
||||
typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
|
||||
typedef typename Eigen::internal::traits<Derived>::Index Index; \
|
||||
enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
|
||||
ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
|
||||
MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
|
||||
MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime, \
|
||||
Flags = Eigen::internal::traits<Derived>::Flags, \
|
||||
SizeAtCompileTime = Base::SizeAtCompileTime, \
|
||||
MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
|
||||
IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
|
||||
typedef Eigen::Index Index; \
|
||||
enum { MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
|
||||
MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime}; \
|
||||
using Base::derived; \
|
||||
using Base::const_cast_derived;
|
||||
using Base::const_cast_derived;
|
||||
|
||||
#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
|
||||
#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
|
||||
|
@ -26,8 +26,25 @@ namespace Eigen {
|
||||
|
||||
typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;
|
||||
|
||||
/**
|
||||
* \brief The Index type as used for the API.
|
||||
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
|
||||
* \sa \ref TopicPreprocessorDirectives, StorageIndex.
|
||||
*/
|
||||
|
||||
typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE Index;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename IndexDest, typename IndexSrc>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline IndexDest convert_index(const IndexSrc& idx) {
|
||||
// for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
|
||||
eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
|
||||
return IndexDest(idx);
|
||||
}
|
||||
|
||||
|
||||
//classes inheriting no_assignment_operator don't generate a default operator=.
|
||||
class no_assignment_operator
|
||||
{
|
||||
|
@ -66,7 +66,7 @@ template<typename _Scalar, int _Dim, int _Mode, int _Options>
|
||||
struct traits<Transform<_Scalar,_Dim,_Mode,_Options> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef DenseIndex Index;
|
||||
typedef DenseIndex StorageIndex;
|
||||
typedef Dense StorageKind;
|
||||
enum {
|
||||
Dim1 = _Dim==Dynamic ? _Dim : _Dim + 1,
|
||||
@ -202,6 +202,7 @@ public:
|
||||
};
|
||||
/** the scalar type of the coefficients */
|
||||
typedef _Scalar Scalar;
|
||||
typedef DenseIndex StorageIndex;
|
||||
typedef DenseIndex Index;
|
||||
/** type of the matrix used to represent the transformation */
|
||||
typedef typename internal::make_proper_matrix_type<Scalar,Rows,HDim,Options>::type MatrixType;
|
||||
|
@ -60,7 +60,7 @@ template<typename VectorsType, typename CoeffsType, int Side>
|
||||
struct traits<HouseholderSequence<VectorsType,CoeffsType,Side> >
|
||||
{
|
||||
typedef typename VectorsType::Scalar Scalar;
|
||||
typedef typename VectorsType::Index Index;
|
||||
typedef typename VectorsType::StorageIndex StorageIndex;
|
||||
typedef typename VectorsType::StorageKind StorageKind;
|
||||
enum {
|
||||
RowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::RowsAtCompileTime
|
||||
@ -87,7 +87,7 @@ struct hseq_side_dependent_impl
|
||||
{
|
||||
typedef Block<const VectorsType, Dynamic, 1> EssentialVectorType;
|
||||
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;
|
||||
typedef typename VectorsType::Index Index;
|
||||
typedef typename VectorsType::StorageIndex StorageIndex;
|
||||
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
|
||||
{
|
||||
Index start = k+1+h.m_shift;
|
||||
@ -100,7 +100,7 @@ struct hseq_side_dependent_impl<VectorsType, CoeffsType, OnTheRight>
|
||||
{
|
||||
typedef Transpose<Block<const VectorsType, 1, Dynamic> > EssentialVectorType;
|
||||
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType;
|
||||
typedef typename VectorsType::Index Index;
|
||||
typedef typename VectorsType::StorageIndex StorageIndex;
|
||||
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
|
||||
{
|
||||
Index start = k+1+h.m_shift;
|
||||
@ -131,7 +131,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
|
||||
MaxColsAtCompileTime = internal::traits<HouseholderSequence>::MaxColsAtCompileTime
|
||||
};
|
||||
typedef typename internal::traits<HouseholderSequence>::Scalar Scalar;
|
||||
typedef typename VectorsType::Index Index;
|
||||
typedef typename VectorsType::StorageIndex StorageIndex;
|
||||
|
||||
typedef HouseholderSequence<
|
||||
typename internal::conditional<NumTraits<Scalar>::IsComplex,
|
||||
|
@ -34,9 +34,8 @@ class DiagonalPreconditioner
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Matrix<Scalar,Dynamic,1> Vector;
|
||||
typedef typename Vector::Index Index;
|
||||
|
||||
public:
|
||||
typedef typename Vector::StorageIndex StorageIndex;
|
||||
// this typedef is only to export the scalar type and compile-time dimensions to solve_retval
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
|
||||
|
||||
|
@ -159,7 +159,6 @@ class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner
|
||||
public:
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
|
||||
|
@ -166,7 +166,6 @@ class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixTy
|
||||
public:
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
|
||||
|
@ -32,7 +32,7 @@ Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
|
||||
using std::swap;
|
||||
using std::abs;
|
||||
Index mid;
|
||||
Index n = row.size(); /* length of the vector */
|
||||
Index n = convert_index<Index>(row.size()); /* length of the vector */
|
||||
Index first, last ;
|
||||
|
||||
ncut--; /* to fit the zero-based indices */
|
||||
@ -105,7 +105,7 @@ class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar> >
|
||||
typedef Matrix<Scalar,Dynamic,1> Vector;
|
||||
typedef SparseMatrix<Scalar,RowMajor> FactorType;
|
||||
typedef SparseMatrix<Scalar,ColMajor> PermutType;
|
||||
typedef typename FactorType::Index Index;
|
||||
typedef typename FactorType::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
|
||||
@ -124,9 +124,9 @@ class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar> >
|
||||
compute(mat);
|
||||
}
|
||||
|
||||
Index rows() const { return m_lu.rows(); }
|
||||
StorageIndex rows() const { return m_lu.rows(); }
|
||||
|
||||
Index cols() const { return m_lu.cols(); }
|
||||
StorageIndex cols() const { return m_lu.cols(); }
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
@ -189,8 +189,8 @@ protected:
|
||||
bool m_analysisIsOk;
|
||||
bool m_factorizationIsOk;
|
||||
ComputationInfo m_info;
|
||||
PermutationMatrix<Dynamic,Dynamic,Index> m_P; // Fill-reducing permutation
|
||||
PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv; // Inverse permutation
|
||||
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // Fill-reducing permutation
|
||||
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // Inverse permutation
|
||||
};
|
||||
|
||||
/**
|
||||
@ -218,14 +218,14 @@ template<typename _MatrixType>
|
||||
void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
|
||||
{
|
||||
// Compute the Fill-reducing permutation
|
||||
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
|
||||
SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
|
||||
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
|
||||
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
|
||||
// Symmetrize the pattern
|
||||
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
|
||||
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
|
||||
SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
|
||||
SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;
|
||||
AtA.prune(keep_diag());
|
||||
internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P); // Then compute the AMD ordering...
|
||||
internal::minimum_degree_ordering<Scalar, StorageIndex>(AtA, m_P); // Then compute the AMD ordering...
|
||||
|
||||
m_Pinv = m_P.inverse(); // ... and the inverse permutation
|
||||
|
||||
@ -241,7 +241,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
using std::abs;
|
||||
|
||||
eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
|
||||
Index n = amat.cols(); // Size of the matrix
|
||||
StorageIndex n = amat.cols(); // Size of the matrix
|
||||
m_lu.resize(n,n);
|
||||
// Declare Working vectors and variables
|
||||
Vector u(n) ; // real values of the row -- maximum size is n --
|
||||
@ -250,7 +250,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
|
||||
// Apply the fill-reducing permutation
|
||||
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
|
||||
SparseMatrix<Scalar,RowMajor, Index> mat;
|
||||
SparseMatrix<Scalar,RowMajor, StorageIndex> mat;
|
||||
mat = amat.twistedBy(m_Pinv);
|
||||
|
||||
// Initialization
|
||||
@ -259,21 +259,21 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
u.fill(0);
|
||||
|
||||
// number of largest elements to keep in each row:
|
||||
Index fill_in = static_cast<Index> (amat.nonZeros()*m_fillfactor)/n+1;
|
||||
StorageIndex fill_in = static_cast<StorageIndex> (amat.nonZeros()*m_fillfactor)/n+1;
|
||||
if (fill_in > n) fill_in = n;
|
||||
|
||||
// number of largest nonzero elements to keep in the L and the U part of the current row:
|
||||
Index nnzL = fill_in/2;
|
||||
Index nnzU = nnzL;
|
||||
StorageIndex nnzL = fill_in/2;
|
||||
StorageIndex nnzU = nnzL;
|
||||
m_lu.reserve(n * (nnzL + nnzU + 1));
|
||||
|
||||
// global loop over the rows of the sparse matrix
|
||||
for (Index ii = 0; ii < n; ii++)
|
||||
for (StorageIndex ii = 0; ii < n; ii++)
|
||||
{
|
||||
// 1 - copy the lower and the upper part of the row i of mat in the working vector u
|
||||
|
||||
Index sizeu = 1; // number of nonzero elements in the upper part of the current row
|
||||
Index sizel = 0; // number of nonzero elements in the lower part of the current row
|
||||
StorageIndex sizeu = 1; // number of nonzero elements in the upper part of the current row
|
||||
StorageIndex sizel = 0; // number of nonzero elements in the lower part of the current row
|
||||
ju(ii) = ii;
|
||||
u(ii) = 0;
|
||||
jr(ii) = ii;
|
||||
@ -282,7 +282,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
|
||||
for (; j_it; ++j_it)
|
||||
{
|
||||
Index k = j_it.index();
|
||||
StorageIndex k = j_it.index();
|
||||
if (k < ii)
|
||||
{
|
||||
// copy the lower part
|
||||
@ -298,7 +298,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
else
|
||||
{
|
||||
// copy the upper part
|
||||
Index jpos = ii + sizeu;
|
||||
StorageIndex jpos = ii + sizeu;
|
||||
ju(jpos) = k;
|
||||
u(jpos) = j_it.value();
|
||||
jr(k) = jpos;
|
||||
@ -317,19 +317,19 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
rownorm = sqrt(rownorm);
|
||||
|
||||
// 3 - eliminate the previous nonzero rows
|
||||
Index jj = 0;
|
||||
Index len = 0;
|
||||
StorageIndex jj = 0;
|
||||
StorageIndex len = 0;
|
||||
while (jj < sizel)
|
||||
{
|
||||
// In order to eliminate in the correct order,
|
||||
// we must select first the smallest column index among ju(jj:sizel)
|
||||
Index k;
|
||||
Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
|
||||
StorageIndex k;
|
||||
StorageIndex minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
|
||||
k += jj;
|
||||
if (minrow != ju(jj))
|
||||
{
|
||||
// swap the two locations
|
||||
Index j = ju(jj);
|
||||
StorageIndex j = ju(jj);
|
||||
swap(ju(jj), ju(k));
|
||||
jr(minrow) = jj; jr(j) = k;
|
||||
swap(u(jj), u(k));
|
||||
@ -355,11 +355,11 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
for (; ki_it; ++ki_it)
|
||||
{
|
||||
Scalar prod = fact * ki_it.value();
|
||||
Index j = ki_it.index();
|
||||
Index jpos = jr(j);
|
||||
StorageIndex j = ki_it.index();
|
||||
StorageIndex jpos = jr(j);
|
||||
if (jpos == -1) // fill-in element
|
||||
{
|
||||
Index newpos;
|
||||
StorageIndex newpos;
|
||||
if (j >= ii) // dealing with the upper part
|
||||
{
|
||||
newpos = ii + sizeu;
|
||||
@ -388,7 +388,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
} // end of the elimination on the row ii
|
||||
|
||||
// reset the upper part of the pointer jr to zero
|
||||
for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
|
||||
for(StorageIndex k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
|
||||
|
||||
// 4 - partially sort and insert the elements in the m_lu matrix
|
||||
|
||||
@ -401,7 +401,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
|
||||
// store the largest m_fill elements of the L part
|
||||
m_lu.startVec(ii);
|
||||
for(Index k = 0; k < len; k++)
|
||||
for(StorageIndex k = 0; k < len; k++)
|
||||
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
|
||||
|
||||
// store the diagonal element
|
||||
@ -413,7 +413,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
// sort the U-part of the row
|
||||
// apply the dropping rule first
|
||||
len = 0;
|
||||
for(Index k = 1; k < sizeu; k++)
|
||||
for(StorageIndex k = 1; k < sizeu; k++)
|
||||
{
|
||||
if(abs(u(ii+k)) > m_droptol * rownorm )
|
||||
{
|
||||
@ -429,7 +429,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
internal::QuickSplit(uu, juu, len);
|
||||
|
||||
// store the largest elements of the U part
|
||||
for(Index k = ii + 1; k < ii + len; k++)
|
||||
for(StorageIndex k = ii + 1; k < ii + len; k++)
|
||||
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ public:
|
||||
typedef typename internal::traits<Derived>::MatrixType MatrixType;
|
||||
typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
|
||||
public:
|
||||
@ -115,9 +115,9 @@ public:
|
||||
}
|
||||
|
||||
/** \internal */
|
||||
Index rows() const { return mp_matrix ? mp_matrix->rows() : 0; }
|
||||
StorageIndex rows() const { return mp_matrix ? mp_matrix->rows() : 0; }
|
||||
/** \internal */
|
||||
Index cols() const { return mp_matrix ? mp_matrix->cols() : 0; }
|
||||
StorageIndex cols() const { return mp_matrix ? mp_matrix->cols() : 0; }
|
||||
|
||||
/** \returns the tolerance threshold used by the stopping criteria */
|
||||
RealScalar tolerance() const { return m_tolerance; }
|
||||
|
@ -67,6 +67,7 @@ template<typename _MatrixType> class FullPivLU
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
typedef typename internal::traits<MatrixType>::StorageKind StorageKind;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
|
||||
typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType;
|
||||
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;
|
||||
|
@ -73,6 +73,7 @@ template<typename _MatrixType> class PartialPivLU
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
typedef typename internal::traits<MatrixType>::StorageKind StorageKind;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
|
||||
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
|
||||
typedef typename MatrixType::PlainObject PlainObject;
|
||||
|
@ -42,7 +42,7 @@ template<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j
|
||||
|
||||
/* clear w */
|
||||
template<typename Index>
|
||||
static int cs_wclear (Index mark, Index lemax, Index *w, Index n)
|
||||
static Index cs_wclear (Index mark, Index lemax, Index *w, Index n)
|
||||
{
|
||||
Index k;
|
||||
if(mark < 2 || (mark + lemax < 0))
|
||||
@ -59,7 +59,7 @@ static int cs_wclear (Index mark, Index lemax, Index *w, Index n)
|
||||
template<typename Index>
|
||||
Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack)
|
||||
{
|
||||
int i, p, top = 0;
|
||||
Index i, p, top = 0;
|
||||
if(!head || !next || !post || !stack) return (-1); /* check inputs */
|
||||
stack[0] = j; /* place j on the stack */
|
||||
while (top >= 0) /* while (stack is not empty) */
|
||||
@ -92,11 +92,12 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
{
|
||||
using std::sqrt;
|
||||
|
||||
int d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
|
||||
k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
|
||||
ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t;
|
||||
unsigned int h;
|
||||
Index d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
|
||||
k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
|
||||
ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t;
|
||||
|
||||
std::size_t h;
|
||||
|
||||
Index n = C.cols();
|
||||
dense = std::max<Index> (16, Index(10 * sqrt(double(n)))); /* find dense threshold */
|
||||
dense = std::min<Index> (n-2, dense);
|
||||
@ -330,7 +331,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
h %= n; /* finalize hash of i */
|
||||
next[i] = hhead[h]; /* place i in hash bucket */
|
||||
hhead[h] = i;
|
||||
last[i] = h; /* save hash of i in last[i] */
|
||||
last[i] = Index(h); /* save hash of i in last[i] */
|
||||
}
|
||||
} /* scan2 is done */
|
||||
degree[k] = dk; /* finalize |Lk| */
|
||||
|
@ -139,6 +139,7 @@ class PastixBase : public SparseSolverBase<Derived>
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<Scalar,Dynamic,1> Vector;
|
||||
typedef SparseMatrix<Scalar, ColMajor> ColSpMatrix;
|
||||
|
||||
|
@ -110,7 +110,7 @@ class PardisoImpl : public SparseSolveBase<PardisoImpl<Derived>
|
||||
typedef typename Traits::MatrixType MatrixType;
|
||||
typedef typename Traits::Scalar Scalar;
|
||||
typedef typename Traits::RealScalar RealScalar;
|
||||
typedef typename Traits::Index Index;
|
||||
typedef typename Traits::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar,RowMajor,Index> SparseMatrixType;
|
||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
typedef Matrix<Index, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
|
||||
|
@ -58,6 +58,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
|
||||
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
|
||||
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
|
||||
@ -69,7 +70,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
|
||||
private:
|
||||
|
||||
typedef typename PermutationType::Index PermIndexType;
|
||||
typedef typename PermutationType::StorageIndex PermIndexType;
|
||||
|
||||
public:
|
||||
|
||||
|
@ -67,6 +67,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;
|
||||
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
|
||||
typedef Matrix<Index, 1,
|
||||
|
@ -54,6 +54,7 @@ template<typename _MatrixType> class HouseholderQR
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
|
||||
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
|
||||
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
|
||||
|
@ -63,9 +63,9 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
|
||||
public:
|
||||
typedef typename _MatrixType::Scalar Scalar;
|
||||
typedef typename _MatrixType::RealScalar RealScalar;
|
||||
typedef UF_long Index ;
|
||||
typedef SparseMatrix<Scalar, ColMajor, Index> MatrixType;
|
||||
typedef Map<PermutationMatrix<Dynamic, Dynamic, Index> > PermutationType;
|
||||
typedef UF_long StorageIndex ;
|
||||
typedef SparseMatrix<Scalar, ColMajor, StorageIndex> MatrixType;
|
||||
typedef Map<PermutationMatrix<Dynamic, Dynamic, StorageIndex> > PermutationType;
|
||||
public:
|
||||
SPQR()
|
||||
: m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon())
|
||||
@ -150,7 +150,7 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
|
||||
{
|
||||
eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
|
||||
if(!m_isRUpToDate) {
|
||||
m_R = viewAsEigen<Scalar,ColMajor, typename MatrixType::Index>(*m_cR);
|
||||
m_R = viewAsEigen<Scalar,ColMajor, typename MatrixType::StorageIndex>(*m_cR);
|
||||
m_isRUpToDate = true;
|
||||
}
|
||||
return m_R;
|
||||
@ -204,11 +204,11 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
|
||||
RealScalar m_tolerance; // treat columns with 2-norm below this tolerance as zero
|
||||
mutable cholmod_sparse *m_cR; // The sparse R factor in cholmod format
|
||||
mutable MatrixType m_R; // The sparse matrix R in Eigen format
|
||||
mutable Index *m_E; // The permutation applied to columns
|
||||
mutable StorageIndex *m_E; // The permutation applied to columns
|
||||
mutable cholmod_sparse *m_H; //The householder vectors
|
||||
mutable Index *m_HPinv; // The row permutation of H
|
||||
mutable StorageIndex *m_HPinv; // The row permutation of H
|
||||
mutable cholmod_dense *m_HTau; // The Householder coefficients
|
||||
mutable Index m_rank; // The rank of the matrix
|
||||
mutable StorageIndex m_rank; // The rank of the matrix
|
||||
mutable cholmod_common m_cc; // Workspace and parameters
|
||||
template<typename ,typename > friend struct SPQR_QProduct;
|
||||
};
|
||||
@ -217,12 +217,12 @@ template <typename SPQRType, typename Derived>
|
||||
struct SPQR_QProduct : ReturnByValue<SPQR_QProduct<SPQRType,Derived> >
|
||||
{
|
||||
typedef typename SPQRType::Scalar Scalar;
|
||||
typedef typename SPQRType::Index Index;
|
||||
typedef typename SPQRType::StorageIndex StorageIndex;
|
||||
//Define the constructor to get reference to argument types
|
||||
SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {}
|
||||
|
||||
inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); }
|
||||
inline Index cols() const { return m_other.cols(); }
|
||||
inline StorageIndex rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); }
|
||||
inline StorageIndex cols() const { return m_other.cols(); }
|
||||
// Assign to a vector
|
||||
template<typename ResType>
|
||||
void evalTo(ResType& res) const
|
||||
|
@ -53,6 +53,7 @@ public:
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
enum {
|
||||
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
||||
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
||||
|
@ -44,8 +44,8 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
|
||||
enum { UpLo = internal::traits<Derived>::UpLo };
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType;
|
||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
|
||||
public:
|
||||
@ -70,8 +70,8 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
|
||||
Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
||||
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline StorageIndex cols() const { return m_matrix.cols(); }
|
||||
inline StorageIndex rows() const { return m_matrix.rows(); }
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
@ -216,16 +216,16 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
|
||||
VectorType m_diag; // the diagonal coefficients (LDLT mode)
|
||||
VectorXi m_parent; // elimination tree
|
||||
VectorXi m_nonZerosPerCol;
|
||||
PermutationMatrix<Dynamic,Dynamic,Index> m_P; // the permutation
|
||||
PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv; // the inverse permutation
|
||||
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // the permutation
|
||||
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // the inverse permutation
|
||||
|
||||
RealScalar m_shiftOffset;
|
||||
RealScalar m_shiftScale;
|
||||
};
|
||||
|
||||
template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLLT;
|
||||
template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLDLT;
|
||||
template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialCholesky;
|
||||
template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialLLT;
|
||||
template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialLDLT;
|
||||
template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialCholesky;
|
||||
|
||||
namespace internal {
|
||||
|
||||
@ -235,8 +235,8 @@ template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<Simp
|
||||
typedef _Ordering OrderingType;
|
||||
enum { UpLo = _UpLo };
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef SparseMatrix<Scalar, ColMajor, Index> CholMatrixType;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType;
|
||||
typedef TriangularView<const CholMatrixType, Eigen::Lower> MatrixL;
|
||||
typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::Upper> MatrixU;
|
||||
static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
|
||||
@ -249,8 +249,8 @@ template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<Simpl
|
||||
typedef _Ordering OrderingType;
|
||||
enum { UpLo = _UpLo };
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef SparseMatrix<Scalar, ColMajor, Index> CholMatrixType;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType;
|
||||
typedef TriangularView<const CholMatrixType, Eigen::UnitLower> MatrixL;
|
||||
typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU;
|
||||
static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
|
||||
@ -293,7 +293,7 @@ public:
|
||||
typedef SimplicialCholeskyBase<SimplicialLLT> Base;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
|
||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
typedef internal::traits<SimplicialLLT> Traits;
|
||||
@ -382,8 +382,8 @@ public:
|
||||
typedef SimplicialCholeskyBase<SimplicialLDLT> Base;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType;
|
||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
typedef internal::traits<SimplicialLDLT> Traits;
|
||||
typedef typename Traits::MatrixL MatrixL;
|
||||
@ -464,8 +464,8 @@ public:
|
||||
typedef SimplicialCholeskyBase<SimplicialCholesky> Base;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType;
|
||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
typedef internal::traits<SimplicialCholesky> Traits;
|
||||
typedef internal::traits<SimplicialLDLT<MatrixType,UpLo> > LDLTTraits;
|
||||
|
@ -57,7 +57,7 @@ void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrix
|
||||
|
||||
ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
|
||||
|
||||
for(Index k = 0; k < size; ++k)
|
||||
for(StorageIndex k = 0; k < size; ++k)
|
||||
{
|
||||
/* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
|
||||
m_parent[k] = -1; /* parent of k is not yet known */
|
||||
@ -82,7 +82,7 @@ void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrix
|
||||
}
|
||||
|
||||
/* construct Lp index array from m_nonZerosPerCol column counts */
|
||||
Index* Lp = m_matrix.outerIndexPtr();
|
||||
StorageIndex* Lp = m_matrix.outerIndexPtr();
|
||||
Lp[0] = 0;
|
||||
for(Index k = 0; k < size; ++k)
|
||||
Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
|
||||
@ -104,35 +104,35 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
|
||||
|
||||
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
|
||||
eigen_assert(ap.rows()==ap.cols());
|
||||
const Index size = ap.rows();
|
||||
const StorageIndex size = ap.rows();
|
||||
eigen_assert(m_parent.size()==size);
|
||||
eigen_assert(m_nonZerosPerCol.size()==size);
|
||||
|
||||
const Index* Lp = m_matrix.outerIndexPtr();
|
||||
Index* Li = m_matrix.innerIndexPtr();
|
||||
const StorageIndex* Lp = m_matrix.outerIndexPtr();
|
||||
StorageIndex* Li = m_matrix.innerIndexPtr();
|
||||
Scalar* Lx = m_matrix.valuePtr();
|
||||
|
||||
ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
|
||||
ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0);
|
||||
ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
|
||||
ei_declare_aligned_stack_constructed_variable(StorageIndex, pattern, size, 0);
|
||||
ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0);
|
||||
|
||||
bool ok = true;
|
||||
m_diag.resize(DoLDLT ? size : 0);
|
||||
|
||||
for(Index k = 0; k < size; ++k)
|
||||
for(StorageIndex k = 0; k < size; ++k)
|
||||
{
|
||||
// compute nonzero pattern of kth row of L, in topological order
|
||||
y[k] = 0.0; // Y(0:k) is now all zero
|
||||
Index top = size; // stack for pattern is empty
|
||||
StorageIndex top = size; // stack for pattern is empty
|
||||
tags[k] = k; // mark node k as visited
|
||||
m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
|
||||
for(typename MatrixType::InnerIterator it(ap,k); it; ++it)
|
||||
{
|
||||
Index i = it.index();
|
||||
StorageIndex i = it.index();
|
||||
if(i <= k)
|
||||
{
|
||||
y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
|
||||
Index len;
|
||||
StorageIndex len;
|
||||
for(len = 0; tags[i] != k; i = m_parent[i])
|
||||
{
|
||||
pattern[len++] = i; /* L(k,i) is nonzero */
|
||||
@ -149,7 +149,7 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
|
||||
y[k] = 0.0;
|
||||
for(; top < size; ++top)
|
||||
{
|
||||
Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
|
||||
StorageIndex i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
|
||||
Scalar yi = y[i]; /* get and clear Y(i) */
|
||||
y[i] = 0.0;
|
||||
|
||||
@ -160,8 +160,8 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
|
||||
else
|
||||
yi = l_ki = yi / Lx[Lp[i]];
|
||||
|
||||
Index p2 = Lp[i] + m_nonZerosPerCol[i];
|
||||
Index p;
|
||||
StorageIndex p2 = Lp[i] + m_nonZerosPerCol[i];
|
||||
StorageIndex p;
|
||||
for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
|
||||
y[Li[p]] -= numext::conj(Lx[p]) * yi;
|
||||
d -= numext::real(l_ki * numext::conj(yi));
|
||||
@ -180,7 +180,7 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
|
||||
}
|
||||
else
|
||||
{
|
||||
Index p = Lp[k] + m_nonZerosPerCol[k]++;
|
||||
StorageIndex p = Lp[k] + m_nonZerosPerCol[k]++;
|
||||
Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */
|
||||
if(d <= RealScalar(0)) {
|
||||
ok = false; /* failure, matrix is not positive definite */
|
||||
|
@ -19,12 +19,12 @@ namespace internal {
|
||||
*
|
||||
* See BasicSparseLLT and SparseProduct for usage examples.
|
||||
*/
|
||||
template<typename _Scalar, typename _Index>
|
||||
template<typename _Scalar, typename _StorageIndex>
|
||||
class AmbiVector
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
explicit AmbiVector(Index size)
|
||||
@ -36,10 +36,10 @@ class AmbiVector
|
||||
void init(double estimatedDensity);
|
||||
void init(int mode);
|
||||
|
||||
Index nonZeros() const;
|
||||
StorageIndex nonZeros() const;
|
||||
|
||||
/** Specifies a sub-vector to work on */
|
||||
void setBounds(Index start, Index end) { m_start = start; m_end = end; }
|
||||
void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }
|
||||
|
||||
void setZero();
|
||||
|
||||
@ -55,12 +55,16 @@ class AmbiVector
|
||||
{
|
||||
if (m_allocatedSize < size)
|
||||
reallocate(size);
|
||||
m_size = size;
|
||||
m_size = convert_index(size);
|
||||
}
|
||||
|
||||
Index size() const { return m_size; }
|
||||
StorageIndex size() const { return m_size; }
|
||||
|
||||
protected:
|
||||
StorageIndex convert_index(Index idx)
|
||||
{
|
||||
return internal::convert_index<StorageIndex>(idx);
|
||||
}
|
||||
|
||||
void reallocate(Index size)
|
||||
{
|
||||
@ -70,15 +74,15 @@ class AmbiVector
|
||||
if (size<1000)
|
||||
{
|
||||
Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
|
||||
m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
|
||||
m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
|
||||
m_buffer = new Scalar[allocSize];
|
||||
}
|
||||
else
|
||||
{
|
||||
m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
|
||||
m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
|
||||
m_buffer = new Scalar[size];
|
||||
}
|
||||
m_size = size;
|
||||
m_size = convert_index(size);
|
||||
m_start = 0;
|
||||
m_end = m_size;
|
||||
}
|
||||
@ -86,7 +90,7 @@ class AmbiVector
|
||||
void reallocateSparse()
|
||||
{
|
||||
Index copyElements = m_allocatedElements;
|
||||
m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
|
||||
m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);
|
||||
Index allocSize = m_allocatedElements * sizeof(ListEl);
|
||||
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
|
||||
Scalar* newBuffer = new Scalar[allocSize];
|
||||
@ -99,30 +103,30 @@ class AmbiVector
|
||||
// element type of the linked list
|
||||
struct ListEl
|
||||
{
|
||||
Index next;
|
||||
Index index;
|
||||
StorageIndex next;
|
||||
StorageIndex index;
|
||||
Scalar value;
|
||||
};
|
||||
|
||||
// used to store data in both mode
|
||||
Scalar* m_buffer;
|
||||
Scalar m_zero;
|
||||
Index m_size;
|
||||
Index m_start;
|
||||
Index m_end;
|
||||
Index m_allocatedSize;
|
||||
Index m_allocatedElements;
|
||||
Index m_mode;
|
||||
StorageIndex m_size;
|
||||
StorageIndex m_start;
|
||||
StorageIndex m_end;
|
||||
StorageIndex m_allocatedSize;
|
||||
StorageIndex m_allocatedElements;
|
||||
StorageIndex m_mode;
|
||||
|
||||
// linked list mode
|
||||
Index m_llStart;
|
||||
Index m_llCurrent;
|
||||
Index m_llSize;
|
||||
StorageIndex m_llStart;
|
||||
StorageIndex m_llCurrent;
|
||||
StorageIndex m_llSize;
|
||||
};
|
||||
|
||||
/** \returns the number of non zeros in the current sub vector */
|
||||
template<typename _Scalar,typename _Index>
|
||||
_Index AmbiVector<_Scalar,_Index>::nonZeros() const
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
_StorageIndex AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
|
||||
{
|
||||
if (m_mode==IsSparse)
|
||||
return m_llSize;
|
||||
@ -130,8 +134,8 @@ _Index AmbiVector<_Scalar,_Index>::nonZeros() const
|
||||
return m_end - m_start;
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _Index>
|
||||
void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
|
||||
{
|
||||
if (estimatedDensity>0.1)
|
||||
init(IsDense);
|
||||
@ -139,8 +143,8 @@ void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
|
||||
init(IsSparse);
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _Index>
|
||||
void AmbiVector<_Scalar,_Index>::init(int mode)
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
|
||||
{
|
||||
m_mode = mode;
|
||||
if (m_mode==IsSparse)
|
||||
@ -155,15 +159,15 @@ void AmbiVector<_Scalar,_Index>::init(int mode)
|
||||
*
|
||||
* Don't worry, this function is extremely cheap.
|
||||
*/
|
||||
template<typename _Scalar,typename _Index>
|
||||
void AmbiVector<_Scalar,_Index>::restart()
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::restart()
|
||||
{
|
||||
m_llCurrent = m_llStart;
|
||||
}
|
||||
|
||||
/** Set all coefficients of current subvector to zero */
|
||||
template<typename _Scalar,typename _Index>
|
||||
void AmbiVector<_Scalar,_Index>::setZero()
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::setZero()
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
{
|
||||
@ -178,8 +182,8 @@ void AmbiVector<_Scalar,_Index>::setZero()
|
||||
}
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _Index>
|
||||
_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
return m_buffer[i];
|
||||
@ -195,7 +199,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
|
||||
m_llCurrent = 0;
|
||||
++m_llSize;
|
||||
llElements[0].value = Scalar(0);
|
||||
llElements[0].index = i;
|
||||
llElements[0].index = convert_index(i);
|
||||
llElements[0].next = -1;
|
||||
return llElements[0].value;
|
||||
}
|
||||
@ -204,7 +208,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
|
||||
// this is going to be the new first element of the list
|
||||
ListEl& el = llElements[m_llSize];
|
||||
el.value = Scalar(0);
|
||||
el.index = i;
|
||||
el.index = convert_index(i);
|
||||
el.next = m_llStart;
|
||||
m_llStart = m_llSize;
|
||||
++m_llSize;
|
||||
@ -213,7 +217,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
|
||||
}
|
||||
else
|
||||
{
|
||||
Index nextel = llElements[m_llCurrent].next;
|
||||
StorageIndex nextel = llElements[m_llCurrent].next;
|
||||
eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
|
||||
while (nextel >= 0 && llElements[nextel].index<=i)
|
||||
{
|
||||
@ -237,7 +241,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
|
||||
// let's insert a new coefficient
|
||||
ListEl& el = llElements[m_llSize];
|
||||
el.value = Scalar(0);
|
||||
el.index = i;
|
||||
el.index = convert_index(i);
|
||||
el.next = llElements[m_llCurrent].next;
|
||||
llElements[m_llCurrent].next = m_llSize;
|
||||
++m_llSize;
|
||||
@ -247,8 +251,8 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
|
||||
}
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _Index>
|
||||
_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
return m_buffer[i];
|
||||
@ -275,8 +279,8 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
|
||||
}
|
||||
|
||||
/** Iterator over the nonzero coefficients */
|
||||
template<typename _Scalar,typename _Index>
|
||||
class AmbiVector<_Scalar,_Index>::Iterator
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
class AmbiVector<_Scalar,_StorageIndex>::Iterator
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
@ -320,7 +324,7 @@ class AmbiVector<_Scalar,_Index>::Iterator
|
||||
}
|
||||
}
|
||||
|
||||
Index index() const { return m_cachedIndex; }
|
||||
StorageIndex index() const { return m_cachedIndex; }
|
||||
Scalar value() const { return m_cachedValue; }
|
||||
|
||||
operator bool() const { return m_cachedIndex>=0; }
|
||||
@ -359,9 +363,9 @@ class AmbiVector<_Scalar,_Index>::Iterator
|
||||
|
||||
protected:
|
||||
const AmbiVector& m_vector; // the target vector
|
||||
Index m_currentEl; // the current element in sparse/linked-list mode
|
||||
StorageIndex m_currentEl; // the current element in sparse/linked-list mode
|
||||
RealScalar m_epsilon; // epsilon used to prune zero coefficients
|
||||
Index m_cachedIndex; // current coordinate
|
||||
StorageIndex m_cachedIndex; // current coordinate
|
||||
Scalar m_cachedValue; // current value
|
||||
bool m_isDense; // mode of the vector
|
||||
};
|
||||
|
@ -18,13 +18,13 @@ namespace internal {
|
||||
* Stores a sparse set of values as a list of values and a list of indices.
|
||||
*
|
||||
*/
|
||||
template<typename _Scalar,typename _Index>
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
class CompressedStorage
|
||||
{
|
||||
public:
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
|
||||
protected:
|
||||
|
||||
@ -92,10 +92,10 @@ class CompressedStorage
|
||||
|
||||
void append(const Scalar& v, Index i)
|
||||
{
|
||||
Index id = static_cast<Index>(m_size);
|
||||
Index id = m_size;
|
||||
resize(m_size+1, 1);
|
||||
m_values[id] = v;
|
||||
m_indices[id] = i;
|
||||
m_indices[id] = internal::convert_index<StorageIndex>(i);
|
||||
}
|
||||
|
||||
inline size_t size() const { return m_size; }
|
||||
@ -105,17 +105,17 @@ class CompressedStorage
|
||||
inline Scalar& value(size_t i) { return m_values[i]; }
|
||||
inline const Scalar& value(size_t i) const { return m_values[i]; }
|
||||
|
||||
inline Index& index(size_t i) { return m_indices[i]; }
|
||||
inline const Index& index(size_t i) const { return m_indices[i]; }
|
||||
inline StorageIndex& index(size_t i) { return m_indices[i]; }
|
||||
inline const StorageIndex& index(size_t i) const { return m_indices[i]; }
|
||||
|
||||
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
|
||||
inline Index searchLowerIndex(Index key) const
|
||||
inline StorageIndex searchLowerIndex(Index key) const
|
||||
{
|
||||
return searchLowerIndex(0, m_size, key);
|
||||
}
|
||||
|
||||
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
|
||||
inline Index searchLowerIndex(size_t start, size_t end, Index key) const
|
||||
inline StorageIndex searchLowerIndex(size_t start, size_t end, Index key) const
|
||||
{
|
||||
while(end>start)
|
||||
{
|
||||
@ -125,7 +125,7 @@ class CompressedStorage
|
||||
else
|
||||
end = mid;
|
||||
}
|
||||
return static_cast<Index>(start);
|
||||
return static_cast<StorageIndex>(start);
|
||||
}
|
||||
|
||||
/** \returns the stored value at index \a key
|
||||
@ -167,7 +167,7 @@ class CompressedStorage
|
||||
{
|
||||
m_allocatedSize = 2*(m_size+1);
|
||||
internal::scoped_array<Scalar> newValues(m_allocatedSize);
|
||||
internal::scoped_array<Index> newIndices(m_allocatedSize);
|
||||
internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
|
||||
|
||||
// copy first chunk
|
||||
internal::smart_copy(m_values, m_values +id, newValues.ptr());
|
||||
@ -188,7 +188,7 @@ class CompressedStorage
|
||||
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
|
||||
}
|
||||
m_size++;
|
||||
m_indices[id] = key;
|
||||
m_indices[id] = convert_index<StorageIndex>(key);
|
||||
m_values[id] = defaultValue;
|
||||
}
|
||||
return m_values[id];
|
||||
@ -216,7 +216,7 @@ class CompressedStorage
|
||||
{
|
||||
eigen_internal_assert(size!=m_allocatedSize);
|
||||
internal::scoped_array<Scalar> newValues(size);
|
||||
internal::scoped_array<Index> newIndices(size);
|
||||
internal::scoped_array<StorageIndex> newIndices(size);
|
||||
size_t copySize = (std::min)(size, m_size);
|
||||
internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
|
||||
internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
|
||||
@ -227,7 +227,7 @@ class CompressedStorage
|
||||
|
||||
protected:
|
||||
Scalar* m_values;
|
||||
Index* m_indices;
|
||||
StorageIndex* m_indices;
|
||||
size_t m_size;
|
||||
size_t m_allocatedSize;
|
||||
|
||||
|
@ -18,7 +18,6 @@ template<typename Lhs, typename Rhs, typename ResultType>
|
||||
static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
|
||||
{
|
||||
typedef typename remove_all<Lhs>::type::Scalar Scalar;
|
||||
typedef typename remove_all<Lhs>::type::Index Index;
|
||||
|
||||
// make sure to call innerSize/outerSize since we fake the storage order.
|
||||
Index rows = lhs.innerSize();
|
||||
@ -137,8 +136,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrixAux;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
|
||||
typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime>::type ColMajorMatrix;
|
||||
|
||||
// If the result is tall and thin (in the extreme case a column vector)
|
||||
@ -167,7 +166,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
RowMajorMatrix rhsRow = rhs;
|
||||
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
|
||||
@ -180,7 +179,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
RowMajorMatrix lhsRow = lhs;
|
||||
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
|
||||
@ -193,7 +192,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
|
||||
res = resRow;
|
||||
@ -208,7 +207,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
||||
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
|
||||
res = resCol;
|
||||
@ -220,7 +219,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
||||
ColMajorMatrix lhsCol = lhs;
|
||||
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
|
||||
@ -233,7 +232,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
||||
ColMajorMatrix rhsCol = rhs;
|
||||
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
|
||||
@ -246,8 +245,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
||||
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
|
||||
// sort the non zeros:
|
||||
|
@ -22,14 +22,14 @@ namespace Eigen {
|
||||
*
|
||||
*/
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _Flags, typename _Index>
|
||||
struct traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : traits<SparseMatrix<_Scalar, _Flags, _Index> >
|
||||
template<typename _Scalar, int _Flags, typename _StorageIndex>
|
||||
struct traits<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> > : traits<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
|
||||
{};
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Flags, typename _Index>
|
||||
template<typename _Scalar, int _Flags, typename _StorageIndex>
|
||||
class MappedSparseMatrix
|
||||
: public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
|
||||
: public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> >
|
||||
{
|
||||
public:
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
|
||||
@ -37,19 +37,19 @@ class MappedSparseMatrix
|
||||
|
||||
protected:
|
||||
|
||||
Index m_outerSize;
|
||||
Index m_innerSize;
|
||||
Index m_nnz;
|
||||
Index* m_outerIndex;
|
||||
Index* m_innerIndices;
|
||||
StorageIndex m_outerSize;
|
||||
StorageIndex m_innerSize;
|
||||
StorageIndex m_nnz;
|
||||
StorageIndex* m_outerIndex;
|
||||
StorageIndex* m_innerIndices;
|
||||
Scalar* m_values;
|
||||
|
||||
public:
|
||||
|
||||
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
|
||||
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
|
||||
inline Index innerSize() const { return m_innerSize; }
|
||||
inline Index outerSize() const { return m_outerSize; }
|
||||
inline StorageIndex rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
|
||||
inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
|
||||
inline StorageIndex innerSize() const { return m_innerSize; }
|
||||
inline StorageIndex outerSize() const { return m_outerSize; }
|
||||
|
||||
bool isCompressed() const { return true; }
|
||||
|
||||
@ -58,11 +58,11 @@ class MappedSparseMatrix
|
||||
inline const Scalar* valuePtr() const { return m_values; }
|
||||
inline Scalar* valuePtr() { return m_values; }
|
||||
|
||||
inline const Index* innerIndexPtr() const { return m_innerIndices; }
|
||||
inline Index* innerIndexPtr() { return m_innerIndices; }
|
||||
inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; }
|
||||
inline StorageIndex* innerIndexPtr() { return m_innerIndices; }
|
||||
|
||||
inline const Index* outerIndexPtr() const { return m_outerIndex; }
|
||||
inline Index* outerIndexPtr() { return m_outerIndex; }
|
||||
inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
|
||||
inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
|
||||
//----------------------------------------
|
||||
|
||||
inline Scalar coeff(Index row, Index col) const
|
||||
@ -79,7 +79,7 @@ class MappedSparseMatrix
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
|
||||
const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
|
||||
const StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
|
||||
const Index id = r-&m_innerIndices[0];
|
||||
return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
|
||||
}
|
||||
@ -93,7 +93,7 @@ class MappedSparseMatrix
|
||||
Index end = m_outerIndex[outer+1];
|
||||
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
|
||||
eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
|
||||
Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
|
||||
StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
|
||||
const Index id = r-&m_innerIndices[0];
|
||||
eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
|
||||
return m_values[id];
|
||||
@ -103,24 +103,24 @@ class MappedSparseMatrix
|
||||
class ReverseInnerIterator;
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline Index nonZeros() const { return m_nnz; }
|
||||
inline StorageIndex nonZeros() const { return m_nnz; }
|
||||
|
||||
inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
|
||||
: m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
|
||||
m_innerIndices(innerIndexPtr), m_values(valuePtr)
|
||||
inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr)
|
||||
: m_outerSize(convert_index(IsRowMajor?rows:cols)), m_innerSize(convert_index(IsRowMajor?cols:rows)), m_nnz(convert_index(nnz)),
|
||||
m_outerIndex(outerIndexPtr), m_innerIndices(innerIndexPtr), m_values(valuePtr)
|
||||
{}
|
||||
|
||||
/** Empty destructor */
|
||||
inline ~MappedSparseMatrix() {}
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Flags, typename _Index>
|
||||
class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
|
||||
template<typename Scalar, int _Flags, typename _StorageIndex>
|
||||
class MappedSparseMatrix<Scalar,_Flags,_StorageIndex>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const MappedSparseMatrix& mat, Index outer)
|
||||
: m_matrix(mat),
|
||||
m_outer(outer),
|
||||
m_outer(convert_index(outer)),
|
||||
m_id(mat.outerIndexPtr()[outer]),
|
||||
m_start(m_id),
|
||||
m_end(mat.outerIndexPtr()[outer+1])
|
||||
@ -131,22 +131,22 @@ class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
|
||||
inline Scalar value() const { return m_matrix.valuePtr()[m_id]; }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id]); }
|
||||
|
||||
inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; }
|
||||
inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
||||
inline StorageIndex index() const { return m_matrix.innerIndexPtr()[m_id]; }
|
||||
inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; }
|
||||
|
||||
inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
|
||||
|
||||
protected:
|
||||
const MappedSparseMatrix& m_matrix;
|
||||
const Index m_outer;
|
||||
Index m_id;
|
||||
const Index m_start;
|
||||
const Index m_end;
|
||||
const StorageIndex m_outer;
|
||||
StorageIndex m_id;
|
||||
const StorageIndex m_start;
|
||||
const StorageIndex m_end;
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Flags, typename _Index>
|
||||
class MappedSparseMatrix<Scalar,_Flags,_Index>::ReverseInnerIterator
|
||||
template<typename Scalar, int _Flags, typename _StorageIndex>
|
||||
class MappedSparseMatrix<Scalar,_Flags,_StorageIndex>::ReverseInnerIterator
|
||||
{
|
||||
public:
|
||||
ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer)
|
||||
@ -162,18 +162,18 @@ class MappedSparseMatrix<Scalar,_Flags,_Index>::ReverseInnerIterator
|
||||
inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id-1]); }
|
||||
|
||||
inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; }
|
||||
inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
||||
inline StorageIndex index() const { return m_matrix.innerIndexPtr()[m_id-1]; }
|
||||
inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; }
|
||||
|
||||
inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); }
|
||||
|
||||
protected:
|
||||
const MappedSparseMatrix& m_matrix;
|
||||
const Index m_outer;
|
||||
Index m_id;
|
||||
const Index m_start;
|
||||
const Index m_end;
|
||||
const StorageIndex m_outer;
|
||||
StorageIndex m_id;
|
||||
const StorageIndex m_start;
|
||||
const StorageIndex m_end;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
@ -71,7 +71,6 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
|
||||
{
|
||||
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
|
||||
|
||||
typedef typename DstXprType::Index Index;
|
||||
typedef typename DstXprType::Scalar Scalar;
|
||||
typedef typename internal::evaluator<DstXprType>::type DstEvaluatorType;
|
||||
typedef typename internal::evaluator<SrcXprType>::type SrcEvaluatorType;
|
||||
@ -144,7 +143,6 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar>
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
||||
{
|
||||
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
|
||||
typedef typename SrcXprType::Index Index;
|
||||
|
||||
typename internal::evaluator<SrcXprType>::type srcEval(src);
|
||||
typename internal::evaluator<DstXprType>::type dstEval(dst);
|
||||
@ -161,7 +159,6 @@ struct Assignment<DstXprType, SrcXprType, internal::assign_op<typename DstXprTyp
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &)
|
||||
{
|
||||
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
|
||||
typedef typename SrcXprType::Index Index;
|
||||
|
||||
dst.setZero();
|
||||
typename internal::evaluator<SrcXprType>::type srcEval(src);
|
||||
|
@ -27,39 +27,39 @@ public:
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
|
||||
|
||||
inline BlockImpl(const XprType& xpr, Index i)
|
||||
: m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
|
||||
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
|
||||
{}
|
||||
|
||||
inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
|
||||
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
||||
Index nonZeros() const
|
||||
StorageIndex nonZeros() const
|
||||
{
|
||||
typedef typename internal::evaluator<XprType>::type EvaluatorType;
|
||||
EvaluatorType matEval(m_matrix);
|
||||
Index nnz = 0;
|
||||
StorageIndex nnz = 0;
|
||||
Index end = m_outerStart + m_outerSize.value();
|
||||
for(int j=m_outerStart; j<end; ++j)
|
||||
for(Index j=m_outerStart; j<end; ++j)
|
||||
for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
|
||||
++nnz;
|
||||
return nnz;
|
||||
}
|
||||
|
||||
inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
|
||||
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
|
||||
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
|
||||
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
StorageIndex startRow() const { return IsRowMajor ? m_outerStart : 0; }
|
||||
StorageIndex startCol() const { return IsRowMajor ? 0 : m_outerStart; }
|
||||
StorageIndex blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
StorageIndex blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
||||
protected:
|
||||
|
||||
typename XprType::Nested m_matrix;
|
||||
Index m_outerStart;
|
||||
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
|
||||
StorageIndex m_outerStart;
|
||||
const internal::variable_if_dynamic<StorageIndex, OuterSize> m_outerSize;
|
||||
|
||||
public:
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
|
||||
@ -82,15 +82,16 @@ public:
|
||||
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
|
||||
protected:
|
||||
typedef typename Base::IndexVector IndexVector;
|
||||
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
|
||||
public:
|
||||
|
||||
inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index i)
|
||||
: m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
|
||||
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
|
||||
{}
|
||||
|
||||
inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
|
||||
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
|
||||
{}
|
||||
|
||||
template<typename OtherDerived>
|
||||
@ -102,14 +103,14 @@ public:
|
||||
// and/or it is not at the end of the nonzeros of the underlying matrix.
|
||||
|
||||
// 1 - eval to a temporary to avoid transposition and/or aliasing issues
|
||||
SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, Index> tmp(other);
|
||||
SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> tmp(other);
|
||||
|
||||
// 2 - let's check whether there is enough allocated memory
|
||||
Index nnz = tmp.nonZeros();
|
||||
Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
|
||||
Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
|
||||
Index block_size = end - start; // available room in the current block
|
||||
Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
|
||||
StorageIndex nnz = tmp.nonZeros();
|
||||
StorageIndex start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
|
||||
StorageIndex end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
|
||||
StorageIndex block_size = end - start; // available room in the current block
|
||||
StorageIndex tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
|
||||
|
||||
Index free_size = m_matrix.isCompressed()
|
||||
? Index(matrix.data().allocatedSize()) + block_size
|
||||
@ -151,7 +152,7 @@ public:
|
||||
matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros();
|
||||
|
||||
// update outer index pointers
|
||||
Index p = start;
|
||||
StorageIndex p = start;
|
||||
for(Index k=0; k<m_outerSize.value(); ++k)
|
||||
{
|
||||
matrix.outerIndexPtr()[m_outerStart+k] = p;
|
||||
@ -176,25 +177,25 @@ public:
|
||||
inline Scalar* valuePtr()
|
||||
{ return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
|
||||
|
||||
inline const Index* innerIndexPtr() const
|
||||
inline const StorageIndex* innerIndexPtr() const
|
||||
{ return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
|
||||
inline Index* innerIndexPtr()
|
||||
inline StorageIndex* innerIndexPtr()
|
||||
{ return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
|
||||
|
||||
inline const Index* outerIndexPtr() const
|
||||
inline const StorageIndex* outerIndexPtr() const
|
||||
{ return m_matrix.outerIndexPtr() + m_outerStart; }
|
||||
inline Index* outerIndexPtr()
|
||||
inline StorageIndex* outerIndexPtr()
|
||||
{ return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; }
|
||||
|
||||
Index nonZeros() const
|
||||
StorageIndex nonZeros() const
|
||||
{
|
||||
if(m_matrix.isCompressed())
|
||||
return Index( std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
|
||||
- std::size_t(m_matrix.outerIndexPtr()[m_outerStart]));
|
||||
return ( (m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
|
||||
- (m_matrix.outerIndexPtr()[m_outerStart]));
|
||||
else if(m_outerSize.value()==0)
|
||||
return 0;
|
||||
else
|
||||
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
|
||||
return Map<const IndexVector>(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
|
||||
}
|
||||
|
||||
const Scalar& lastCoeff() const
|
||||
@ -207,32 +208,32 @@ public:
|
||||
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
||||
inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
|
||||
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
|
||||
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
|
||||
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
StorageIndex startRow() const { return IsRowMajor ? m_outerStart : 0; }
|
||||
StorageIndex startCol() const { return IsRowMajor ? 0 : m_outerStart; }
|
||||
StorageIndex blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
StorageIndex blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
||||
protected:
|
||||
|
||||
typename SparseMatrixType::Nested m_matrix;
|
||||
Index m_outerStart;
|
||||
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
|
||||
StorageIndex m_outerStart;
|
||||
const internal::variable_if_dynamic<StorageIndex, OuterSize> m_outerSize;
|
||||
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
|
||||
class BlockImpl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols>
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
|
||||
{
|
||||
public:
|
||||
typedef _Index Index;
|
||||
typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
|
||||
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index i)
|
||||
: Base(xpr, i)
|
||||
@ -245,13 +246,13 @@ public:
|
||||
using Base::operator=;
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
|
||||
class BlockImpl<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols>
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
|
||||
{
|
||||
public:
|
||||
typedef _Index Index;
|
||||
typedef const SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
|
||||
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index i)
|
||||
: Base(xpr, i)
|
||||
@ -333,8 +334,8 @@ public:
|
||||
*/
|
||||
inline BlockImpl(const XprType& xpr, Index i)
|
||||
: m_matrix(xpr),
|
||||
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
|
||||
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
|
||||
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
|
||||
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
|
||||
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
|
||||
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
|
||||
{}
|
||||
@ -342,11 +343,11 @@ public:
|
||||
/** Dynamic-size constructor
|
||||
*/
|
||||
inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols)
|
||||
: m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
|
||||
{}
|
||||
|
||||
inline Index rows() const { return m_blockRows.value(); }
|
||||
inline Index cols() const { return m_blockCols.value(); }
|
||||
inline StorageIndex rows() const { return m_blockRows.value(); }
|
||||
inline StorageIndex cols() const { return m_blockCols.value(); }
|
||||
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
@ -374,10 +375,10 @@ public:
|
||||
}
|
||||
|
||||
inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
|
||||
Index startRow() const { return m_startRow.value(); }
|
||||
Index startCol() const { return m_startCol.value(); }
|
||||
Index blockRows() const { return m_blockRows.value(); }
|
||||
Index blockCols() const { return m_blockCols.value(); }
|
||||
StorageIndex startRow() const { return m_startRow.value(); }
|
||||
StorageIndex startCol() const { return m_startCol.value(); }
|
||||
StorageIndex blockRows() const { return m_blockRows.value(); }
|
||||
StorageIndex blockCols() const { return m_blockCols.value(); }
|
||||
|
||||
protected:
|
||||
friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
|
||||
@ -386,10 +387,10 @@ public:
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
|
||||
|
||||
typename XprType::Nested m_matrix;
|
||||
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
|
||||
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
|
||||
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
|
||||
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
|
||||
const internal::variable_if_dynamic<StorageIndex, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
|
||||
const internal::variable_if_dynamic<StorageIndex, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
|
||||
const internal::variable_if_dynamic<StorageIndex, RowsAtCompileTime> m_blockRows;
|
||||
const internal::variable_if_dynamic<StorageIndex, ColsAtCompileTime> m_blockCols;
|
||||
|
||||
};
|
||||
|
||||
@ -402,7 +403,7 @@ namespace internal {
|
||||
IsRowMajor = BlockType::IsRowMajor
|
||||
};
|
||||
typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested;
|
||||
typedef typename BlockType::Index Index;
|
||||
typedef typename BlockType::StorageIndex StorageIndex;
|
||||
typedef typename _MatrixTypeNested::InnerIterator Base;
|
||||
const BlockType& m_block;
|
||||
Index m_end;
|
||||
@ -417,10 +418,10 @@ namespace internal {
|
||||
Base::operator++();
|
||||
}
|
||||
|
||||
inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
|
||||
inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
|
||||
inline Index row() const { return Base::row() - m_block.m_startRow.value(); }
|
||||
inline Index col() const { return Base::col() - m_block.m_startCol.value(); }
|
||||
inline StorageIndex index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
|
||||
inline StorageIndex outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
|
||||
inline StorageIndex row() const { return Base::row() - m_block.m_startRow.value(); }
|
||||
inline StorageIndex col() const { return Base::col() - m_block.m_startCol.value(); }
|
||||
|
||||
inline operator bool() const { return Base::operator bool() && Base::index() < m_end; }
|
||||
};
|
||||
@ -434,13 +435,13 @@ namespace internal {
|
||||
IsRowMajor = BlockType::IsRowMajor
|
||||
};
|
||||
typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested;
|
||||
typedef typename BlockType::Index Index;
|
||||
typedef typename BlockType::StorageIndex StorageIndex;
|
||||
typedef typename BlockType::Scalar Scalar;
|
||||
const BlockType& m_block;
|
||||
Index m_outerPos;
|
||||
Index m_innerIndex;
|
||||
StorageIndex m_outerPos;
|
||||
StorageIndex m_innerIndex;
|
||||
Scalar m_value;
|
||||
Index m_end;
|
||||
StorageIndex m_end;
|
||||
public:
|
||||
|
||||
explicit EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer = 0)
|
||||
@ -456,10 +457,10 @@ namespace internal {
|
||||
++(*this);
|
||||
}
|
||||
|
||||
inline Index index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
|
||||
inline Index outer() const { return 0; }
|
||||
inline Index row() const { return IsRowMajor ? 0 : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : 0; }
|
||||
inline StorageIndex index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
|
||||
inline StorageIndex outer() const { return 0; }
|
||||
inline StorageIndex row() const { return IsRowMajor ? 0 : index(); }
|
||||
inline StorageIndex col() const { return IsRowMajor ? index() : 0; }
|
||||
|
||||
inline Scalar value() const { return m_value; }
|
||||
|
||||
@ -491,7 +492,7 @@ struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBa
|
||||
class OuterVectorInnerIterator;
|
||||
public:
|
||||
typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
|
||||
class ReverseInnerIterator;
|
||||
@ -538,10 +539,10 @@ public:
|
||||
EvalIterator::operator++();
|
||||
}
|
||||
|
||||
inline Index index() const { return EvalIterator::index() - (IsRowMajor ? m_block.startCol() : m_block.startRow()); }
|
||||
inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
|
||||
inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
|
||||
inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
|
||||
inline StorageIndex index() const { return EvalIterator::index() - (IsRowMajor ? m_block.startCol() : m_block.startRow()); }
|
||||
inline StorageIndex outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
|
||||
inline StorageIndex row() const { return EvalIterator::row() - m_block.startRow(); }
|
||||
inline StorageIndex col() const { return EvalIterator::col() - m_block.startCol(); }
|
||||
|
||||
inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
|
||||
};
|
||||
@ -550,10 +551,10 @@ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
|
||||
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
|
||||
{
|
||||
const unary_evaluator& m_eval;
|
||||
Index m_outerPos;
|
||||
Index m_innerIndex;
|
||||
StorageIndex m_outerPos;
|
||||
StorageIndex m_innerIndex;
|
||||
Scalar m_value;
|
||||
Index m_end;
|
||||
StorageIndex m_end;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
|
||||
@ -568,10 +569,10 @@ public:
|
||||
++(*this);
|
||||
}
|
||||
|
||||
inline Index index() const { return m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow()); }
|
||||
inline Index outer() const { return 0; }
|
||||
inline Index row() const { return IsRowMajor ? 0 : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : 0; }
|
||||
inline StorageIndex index() const { return m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow()); }
|
||||
inline StorageIndex outer() const { return 0; }
|
||||
inline StorageIndex row() const { return IsRowMajor ? 0 : index(); }
|
||||
inline StorageIndex col() const { return IsRowMajor ? index() : 0; }
|
||||
|
||||
inline Scalar value() const { return m_value; }
|
||||
|
||||
|
@ -58,10 +58,10 @@ Index etree_find (Index i, IndexVector& pp)
|
||||
* \param perm The permutation to apply to the column of \b mat
|
||||
*/
|
||||
template <typename MatrixType, typename IndexVector>
|
||||
int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0)
|
||||
int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0)
|
||||
{
|
||||
typedef typename MatrixType::Index Index;
|
||||
Index nc = mat.cols(); // Number of columns
|
||||
typedef typename MatrixType::StorageIndex Index;
|
||||
Index nc = mat.cols(); // Number of columns
|
||||
Index m = mat.rows();
|
||||
Index diagSize = (std::min)(nc,m);
|
||||
IndexVector root(nc); // root of subtree of etree
|
||||
@ -70,7 +70,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
|
||||
pp.setZero(); // Initialize disjoint sets
|
||||
parent.resize(mat.cols());
|
||||
//Compute first nonzero column in each row
|
||||
Index row,col;
|
||||
Index row,col;
|
||||
firstRowElt.resize(m);
|
||||
firstRowElt.setConstant(nc);
|
||||
firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
|
||||
@ -89,7 +89,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
|
||||
except use (firstRowElt[r],c) in place of an edge (r,c) of A.
|
||||
Thus each row clique in A'*A is replaced by a star
|
||||
centered at its first vertex, which has the same fill. */
|
||||
Index rset, cset, rroot;
|
||||
Index rset, cset, rroot;
|
||||
for (col = 0; col < nc; col++)
|
||||
{
|
||||
found_diag = col>=m;
|
||||
|
@ -56,7 +56,7 @@ public:
|
||||
class InnerIterator
|
||||
{
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
|
||||
@ -97,9 +97,9 @@ public:
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
|
||||
|
||||
EIGEN_STRONG_INLINE Index index() const { return m_id; }
|
||||
EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
|
||||
EIGEN_STRONG_INLINE StorageIndex row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
|
||||
|
||||
@ -108,7 +108,7 @@ public:
|
||||
RhsIterator m_rhsIter;
|
||||
const BinaryOp& m_functor;
|
||||
Scalar m_value;
|
||||
Index m_id;
|
||||
StorageIndex m_id;
|
||||
};
|
||||
|
||||
|
||||
@ -145,7 +145,7 @@ public:
|
||||
class InnerIterator
|
||||
{
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
|
||||
@ -177,9 +177,9 @@ public:
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
|
||||
|
||||
EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
|
||||
EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex row() const { return m_lhsIter.row(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex col() const { return m_lhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
|
||||
|
||||
@ -223,7 +223,7 @@ public:
|
||||
class InnerIterator
|
||||
{
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
|
||||
|
||||
public:
|
||||
@ -241,9 +241,9 @@ public:
|
||||
EIGEN_STRONG_INLINE Scalar value() const
|
||||
{ return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
|
||||
|
||||
EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
|
||||
EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex row() const { return m_rhsIter.row(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex col() const { return m_rhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
|
||||
|
||||
@ -288,7 +288,7 @@ public:
|
||||
class InnerIterator
|
||||
{
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
|
||||
|
||||
public:
|
||||
@ -307,9 +307,9 @@ public:
|
||||
{ return m_functor(m_lhsIter.value(),
|
||||
m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
|
||||
|
||||
EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
|
||||
EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex row() const { return m_lhsIter.row(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex col() const { return m_lhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
|
||||
|
||||
@ -317,7 +317,7 @@ public:
|
||||
LhsIterator m_lhsIter;
|
||||
const RhsEvaluator &m_rhsEval;
|
||||
const BinaryOp& m_functor;
|
||||
const Index m_outer;
|
||||
const StorageIndex m_outer;
|
||||
};
|
||||
|
||||
|
||||
|
@ -47,7 +47,7 @@ class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterat
|
||||
typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer)
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
|
||||
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
|
||||
{}
|
||||
|
||||
@ -122,7 +122,7 @@ class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerItera
|
||||
typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer)
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
|
||||
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
|
||||
{}
|
||||
|
||||
|
@ -29,7 +29,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
||||
{
|
||||
@ -62,7 +62,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, A
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
|
||||
{
|
||||
@ -86,7 +86,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
||||
{
|
||||
@ -106,7 +106,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
||||
{
|
||||
@ -193,7 +193,7 @@ protected:
|
||||
typedef typename evaluator<ActualRhs>::type RhsEval;
|
||||
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
|
||||
typedef typename ProdXprType::Scalar Scalar;
|
||||
typedef typename ProdXprType::Index Index;
|
||||
typedef typename ProdXprType::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
enum {
|
||||
@ -211,9 +211,9 @@ public:
|
||||
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
|
||||
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
|
||||
EIGEN_STRONG_INLINE StorageIndex outer() const { return m_outer; }
|
||||
EIGEN_STRONG_INLINE StorageIndex row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
|
||||
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
|
||||
|
@ -66,7 +66,7 @@ struct sparse_diagonal_product_evaluator<SparseXprType, DiagonalCoeffType, SDP_A
|
||||
protected:
|
||||
typedef typename evaluator<SparseXprType>::InnerIterator SparseXprInnerIterator;
|
||||
typedef typename SparseXprType::Scalar Scalar;
|
||||
typedef typename SparseXprType::Index Index;
|
||||
typedef typename SparseXprType::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
class InnerIterator : public SparseXprInnerIterator
|
||||
@ -96,7 +96,7 @@ template<typename SparseXprType, typename DiagCoeffType>
|
||||
struct sparse_diagonal_product_evaluator<SparseXprType, DiagCoeffType, SDP_AsCwiseProduct>
|
||||
{
|
||||
typedef typename SparseXprType::Scalar Scalar;
|
||||
typedef typename SparseXprType::Index Index;
|
||||
typedef typename SparseXprType::StorageIndex StorageIndex;
|
||||
|
||||
typedef CwiseBinaryOp<scalar_product_op<Scalar>,
|
||||
const typename SparseXprType::ConstInnerVectorReturnType,
|
||||
@ -111,14 +111,14 @@ struct sparse_diagonal_product_evaluator<SparseXprType, DiagCoeffType, SDP_AsCwi
|
||||
InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
|
||||
: m_cwiseEval(xprEval.m_sparseXprNested.innerVector(outer).cwiseProduct(xprEval.m_diagCoeffNested)),
|
||||
m_cwiseIter(m_cwiseEval, 0),
|
||||
m_outer(outer)
|
||||
m_outer(convert_index<StorageIndex>(outer))
|
||||
{}
|
||||
|
||||
inline Scalar value() const { return m_cwiseIter.value(); }
|
||||
inline Index index() const { return m_cwiseIter.index(); }
|
||||
inline Index outer() const { return m_outer; }
|
||||
inline Index col() const { return SparseXprType::IsRowMajor ? m_cwiseIter.index() : m_outer; }
|
||||
inline Index row() const { return SparseXprType::IsRowMajor ? m_outer : m_cwiseIter.index(); }
|
||||
inline StorageIndex index() const { return convert_index<StorageIndex>(m_cwiseIter.index()); }
|
||||
inline StorageIndex outer() const { return m_outer; }
|
||||
inline StorageIndex col() const { return SparseXprType::IsRowMajor ? m_cwiseIter.index() : m_outer; }
|
||||
inline StorageIndex row() const { return SparseXprType::IsRowMajor ? m_outer : m_cwiseIter.index(); }
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{ ++m_cwiseIter; return *this; }
|
||||
@ -127,7 +127,7 @@ struct sparse_diagonal_product_evaluator<SparseXprType, DiagCoeffType, SDP_AsCwi
|
||||
protected:
|
||||
CwiseProductEval m_cwiseEval;
|
||||
CwiseProductIterator m_cwiseIter;
|
||||
Index m_outer;
|
||||
StorageIndex m_outer;
|
||||
};
|
||||
|
||||
sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagCoeffType &diagCoeff)
|
||||
|
@ -43,7 +43,7 @@ template<typename _Scalar, int _Options, typename _Index>
|
||||
struct traits<SparseMatrix<_Scalar, _Options, _Index> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef _Index StorageIndex;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@ -65,7 +65,7 @@ struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef Dense StorageKind;
|
||||
typedef _Index Index;
|
||||
typedef _Index StorageIndex;
|
||||
typedef MatrixXpr XprKind;
|
||||
|
||||
enum {
|
||||
@ -103,23 +103,24 @@ class SparseMatrix
|
||||
|
||||
|
||||
using Base::IsRowMajor;
|
||||
typedef internal::CompressedStorage<Scalar,Index> Storage;
|
||||
typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
|
||||
enum {
|
||||
Options = _Options
|
||||
};
|
||||
|
||||
typedef typename Base::IndexVector IndexVector;
|
||||
typedef typename Base::ScalarVector ScalarVector;
|
||||
protected:
|
||||
|
||||
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
|
||||
|
||||
Index m_outerSize;
|
||||
Index m_innerSize;
|
||||
Index* m_outerIndex;
|
||||
Index* m_innerNonZeros; // optional, if null then the data is compressed
|
||||
StorageIndex m_outerSize;
|
||||
StorageIndex m_innerSize;
|
||||
StorageIndex* m_outerIndex;
|
||||
StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
|
||||
Storage m_data;
|
||||
|
||||
Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
|
||||
const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
|
||||
Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
|
||||
const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
|
||||
|
||||
public:
|
||||
|
||||
@ -127,14 +128,14 @@ class SparseMatrix
|
||||
inline bool isCompressed() const { return m_innerNonZeros==0; }
|
||||
|
||||
/** \returns the number of rows of the matrix */
|
||||
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
|
||||
inline StorageIndex rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
|
||||
/** \returns the number of columns of the matrix */
|
||||
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
|
||||
inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
|
||||
|
||||
/** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
|
||||
inline Index innerSize() const { return m_innerSize; }
|
||||
inline StorageIndex innerSize() const { return m_innerSize; }
|
||||
/** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
|
||||
inline Index outerSize() const { return m_outerSize; }
|
||||
inline StorageIndex outerSize() const { return m_outerSize; }
|
||||
|
||||
/** \returns a const pointer to the array of values.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
@ -148,29 +149,29 @@ class SparseMatrix
|
||||
/** \returns a const pointer to the array of inner indices.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \sa valuePtr(), outerIndexPtr() */
|
||||
inline const Index* innerIndexPtr() const { return &m_data.index(0); }
|
||||
inline const StorageIndex* innerIndexPtr() const { return &m_data.index(0); }
|
||||
/** \returns a non-const pointer to the array of inner indices.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \sa valuePtr(), outerIndexPtr() */
|
||||
inline Index* innerIndexPtr() { return &m_data.index(0); }
|
||||
inline StorageIndex* innerIndexPtr() { return &m_data.index(0); }
|
||||
|
||||
/** \returns a const pointer to the array of the starting positions of the inner vectors.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \sa valuePtr(), innerIndexPtr() */
|
||||
inline const Index* outerIndexPtr() const { return m_outerIndex; }
|
||||
inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
|
||||
/** \returns a non-const pointer to the array of the starting positions of the inner vectors.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \sa valuePtr(), innerIndexPtr() */
|
||||
inline Index* outerIndexPtr() { return m_outerIndex; }
|
||||
inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
|
||||
|
||||
/** \returns a const pointer to the array of the number of non zeros of the inner vectors.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \warning it returns the null pointer 0 in compressed mode */
|
||||
inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
|
||||
inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
|
||||
/** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \warning it returns the null pointer 0 in compressed mode */
|
||||
inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
|
||||
inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
|
||||
|
||||
/** \internal */
|
||||
inline Storage& data() { return m_data; }
|
||||
@ -234,7 +235,7 @@ class SparseMatrix
|
||||
|
||||
if(isCompressed())
|
||||
{
|
||||
reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
|
||||
reserve(IndexVector::Constant(outerSize(), 2));
|
||||
}
|
||||
return insertUncompressed(row,col);
|
||||
}
|
||||
@ -248,17 +249,17 @@ class SparseMatrix
|
||||
inline void setZero()
|
||||
{
|
||||
m_data.clear();
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
|
||||
if(m_innerNonZeros)
|
||||
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
|
||||
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
|
||||
}
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline Index nonZeros() const
|
||||
inline StorageIndex nonZeros() const
|
||||
{
|
||||
if(m_innerNonZeros)
|
||||
return innerNonZeros().sum();
|
||||
return static_cast<Index>(m_data.size());
|
||||
return convert_index(Index(m_data.size()));
|
||||
}
|
||||
|
||||
/** Preallocates \a reserveSize non zeros.
|
||||
@ -302,13 +303,13 @@ class SparseMatrix
|
||||
{
|
||||
std::size_t totalReserveSize = 0;
|
||||
// turn the matrix into non-compressed mode
|
||||
m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
|
||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
||||
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
||||
|
||||
// temporarily use m_innerSizes to hold the new starting points.
|
||||
Index* newOuterIndex = m_innerNonZeros;
|
||||
StorageIndex* newOuterIndex = m_innerNonZeros;
|
||||
|
||||
Index count = 0;
|
||||
StorageIndex count = 0;
|
||||
for(Index j=0; j<m_outerSize; ++j)
|
||||
{
|
||||
newOuterIndex[j] = count;
|
||||
@ -316,10 +317,10 @@ class SparseMatrix
|
||||
totalReserveSize += reserveSizes[j];
|
||||
}
|
||||
m_data.reserve(totalReserveSize);
|
||||
Index previousOuterIndex = m_outerIndex[m_outerSize];
|
||||
StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
|
||||
for(Index j=m_outerSize-1; j>=0; --j)
|
||||
{
|
||||
Index innerNNZ = previousOuterIndex - m_outerIndex[j];
|
||||
StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
|
||||
for(Index i=innerNNZ-1; i>=0; --i)
|
||||
{
|
||||
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
|
||||
@ -335,15 +336,15 @@ class SparseMatrix
|
||||
}
|
||||
else
|
||||
{
|
||||
Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
|
||||
StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
|
||||
if (!newOuterIndex) internal::throw_std_bad_alloc();
|
||||
|
||||
Index count = 0;
|
||||
StorageIndex count = 0;
|
||||
for(Index j=0; j<m_outerSize; ++j)
|
||||
{
|
||||
newOuterIndex[j] = count;
|
||||
Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
|
||||
Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
|
||||
StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
|
||||
StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
|
||||
count += toReserve + m_innerNonZeros[j];
|
||||
}
|
||||
newOuterIndex[m_outerSize] = count;
|
||||
@ -354,7 +355,7 @@ class SparseMatrix
|
||||
Index offset = newOuterIndex[j] - m_outerIndex[j];
|
||||
if(offset>0)
|
||||
{
|
||||
Index innerNNZ = m_innerNonZeros[j];
|
||||
StorageIndex innerNNZ = m_innerNonZeros[j];
|
||||
for(Index i=innerNNZ-1; i>=0; --i)
|
||||
{
|
||||
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
|
||||
@ -425,7 +426,7 @@ class SparseMatrix
|
||||
{
|
||||
if(isCompressed())
|
||||
{
|
||||
Index size = static_cast<Index>(m_data.size());
|
||||
StorageIndex size = internal::convert_index<StorageIndex>(Index(m_data.size()));
|
||||
Index i = m_outerSize;
|
||||
// find the last filled column
|
||||
while (i>=0 && m_outerIndex[i]==0)
|
||||
@ -490,7 +491,7 @@ class SparseMatrix
|
||||
{
|
||||
if(m_innerNonZeros != 0)
|
||||
return;
|
||||
m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
|
||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
||||
for (Index i = 0; i < m_outerSize; i++)
|
||||
{
|
||||
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
||||
@ -517,7 +518,7 @@ class SparseMatrix
|
||||
// TODO also implement a unit test
|
||||
makeCompressed();
|
||||
|
||||
Index k = 0;
|
||||
StorageIndex k = 0;
|
||||
for(Index j=0; j<m_outerSize; ++j)
|
||||
{
|
||||
Index previousStart = m_outerIndex[j];
|
||||
@ -550,13 +551,13 @@ class SparseMatrix
|
||||
|
||||
Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
|
||||
Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
|
||||
Index newInnerSize = IsRowMajor ? cols : rows;
|
||||
StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
|
||||
|
||||
// Deals with inner non zeros
|
||||
if (m_innerNonZeros)
|
||||
{
|
||||
// Resize m_innerNonZeros
|
||||
Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
|
||||
StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
|
||||
if (!newInnerNonZeros) internal::throw_std_bad_alloc();
|
||||
m_innerNonZeros = newInnerNonZeros;
|
||||
|
||||
@ -566,7 +567,7 @@ class SparseMatrix
|
||||
else if (innerChange < 0)
|
||||
{
|
||||
// Inner size decreased: allocate a new m_innerNonZeros
|
||||
m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
|
||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
|
||||
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
||||
for(Index i = 0; i < m_outerSize; i++)
|
||||
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
||||
@ -577,8 +578,8 @@ class SparseMatrix
|
||||
{
|
||||
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
|
||||
{
|
||||
Index &n = m_innerNonZeros[i];
|
||||
Index start = m_outerIndex[i];
|
||||
StorageIndex &n = m_innerNonZeros[i];
|
||||
StorageIndex start = m_outerIndex[i];
|
||||
while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
|
||||
}
|
||||
}
|
||||
@ -589,12 +590,12 @@ class SparseMatrix
|
||||
if (outerChange == 0)
|
||||
return;
|
||||
|
||||
Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
|
||||
StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
|
||||
if (!newOuterIndex) internal::throw_std_bad_alloc();
|
||||
m_outerIndex = newOuterIndex;
|
||||
if (outerChange > 0)
|
||||
{
|
||||
Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
|
||||
StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
|
||||
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
|
||||
m_outerIndex[i] = last;
|
||||
}
|
||||
@ -606,13 +607,13 @@ class SparseMatrix
|
||||
*/
|
||||
void resize(Index rows, Index cols)
|
||||
{
|
||||
const Index outerSize = IsRowMajor ? rows : cols;
|
||||
m_innerSize = IsRowMajor ? cols : rows;
|
||||
const StorageIndex outerSize = convert_index(IsRowMajor ? rows : cols);
|
||||
m_innerSize = convert_index(IsRowMajor ? cols : rows);
|
||||
m_data.clear();
|
||||
if (m_outerSize != outerSize || m_outerSize==0)
|
||||
{
|
||||
std::free(m_outerIndex);
|
||||
m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
|
||||
m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
|
||||
if (!m_outerIndex) internal::throw_std_bad_alloc();
|
||||
|
||||
m_outerSize = outerSize;
|
||||
@ -622,7 +623,7 @@ class SparseMatrix
|
||||
std::free(m_innerNonZeros);
|
||||
m_innerNonZeros = 0;
|
||||
}
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
|
||||
}
|
||||
|
||||
/** \internal
|
||||
@ -715,9 +716,9 @@ class SparseMatrix
|
||||
{
|
||||
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
|
||||
this->m_data.resize(rows());
|
||||
Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
|
||||
Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
|
||||
Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
|
||||
Eigen::Map<IndexVector>(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
|
||||
Eigen::Map<ScalarVector>(&this->m_data.value(0), rows()).setOnes();
|
||||
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
|
||||
}
|
||||
inline SparseMatrix& operator=(const SparseMatrix& other)
|
||||
{
|
||||
@ -808,9 +809,7 @@ protected:
|
||||
template<typename Other>
|
||||
void initAssignment(const Other& other)
|
||||
{
|
||||
eigen_assert( other.rows() == typename Other::Index(Index(other.rows()))
|
||||
&& other.cols() == typename Other::Index(Index(other.cols())) );
|
||||
resize(Index(other.rows()), Index(other.cols()));
|
||||
resize(other.rows(), other.cols());
|
||||
if(m_innerNonZeros)
|
||||
{
|
||||
std::free(m_innerNonZeros);
|
||||
@ -826,15 +825,15 @@ protected:
|
||||
* A vector object that is equal to 0 everywhere but v at the position i */
|
||||
class SingletonVector
|
||||
{
|
||||
Index m_index;
|
||||
Index m_value;
|
||||
StorageIndex m_index;
|
||||
StorageIndex m_value;
|
||||
public:
|
||||
typedef Index value_type;
|
||||
typedef StorageIndex value_type;
|
||||
SingletonVector(Index i, Index v)
|
||||
: m_index(i), m_value(v)
|
||||
: m_index(convert_index(i)), m_value(convert_index(v))
|
||||
{}
|
||||
|
||||
Index operator[](Index i) const { return i==m_index ? m_value : 0; }
|
||||
StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
|
||||
};
|
||||
|
||||
/** \internal
|
||||
@ -853,14 +852,14 @@ public:
|
||||
eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
|
||||
|
||||
Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
|
||||
m_data.index(p) = inner;
|
||||
m_data.index(p) = convert_index(inner);
|
||||
return (m_data.value(p) = 0);
|
||||
}
|
||||
|
||||
private:
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
|
||||
}
|
||||
|
||||
@ -880,7 +879,7 @@ class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const SparseMatrix& mat, Index outer)
|
||||
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
|
||||
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(convert_index(outer)), m_id(mat.m_outerIndex[outer])
|
||||
{
|
||||
if(mat.isCompressed())
|
||||
m_end = mat.m_outerIndex[outer+1];
|
||||
@ -893,19 +892,19 @@ class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
|
||||
inline const Scalar& value() const { return m_values[m_id]; }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
|
||||
|
||||
inline Index index() const { return m_indices[m_id]; }
|
||||
inline Index outer() const { return m_outer; }
|
||||
inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
||||
inline StorageIndex index() const { return m_indices[m_id]; }
|
||||
inline StorageIndex outer() const { return m_outer; }
|
||||
inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; }
|
||||
|
||||
inline operator bool() const { return (m_id < m_end); }
|
||||
|
||||
protected:
|
||||
const Scalar* m_values;
|
||||
const Index* m_indices;
|
||||
const Index m_outer;
|
||||
Index m_id;
|
||||
Index m_end;
|
||||
const StorageIndex* m_indices;
|
||||
const StorageIndex m_outer;
|
||||
StorageIndex m_id;
|
||||
StorageIndex m_end;
|
||||
private:
|
||||
// If you get here, then you're not using the right InnerIterator type, e.g.:
|
||||
// SparseMatrix<double,RowMajor> A;
|
||||
@ -931,19 +930,19 @@ class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
|
||||
inline const Scalar& value() const { return m_values[m_id-1]; }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
|
||||
|
||||
inline Index index() const { return m_indices[m_id-1]; }
|
||||
inline Index outer() const { return m_outer; }
|
||||
inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
||||
inline StorageIndex index() const { return m_indices[m_id-1]; }
|
||||
inline StorageIndex outer() const { return m_outer; }
|
||||
inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; }
|
||||
|
||||
inline operator bool() const { return (m_id > m_start); }
|
||||
|
||||
protected:
|
||||
const Scalar* m_values;
|
||||
const Index* m_indices;
|
||||
const Index m_outer;
|
||||
Index m_id;
|
||||
const Index m_start;
|
||||
const StorageIndex* m_indices;
|
||||
const StorageIndex m_outer;
|
||||
StorageIndex m_id;
|
||||
const StorageIndex m_start;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
@ -954,13 +953,13 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
|
||||
EIGEN_UNUSED_VARIABLE(Options);
|
||||
enum { IsRowMajor = SparseMatrixType::IsRowMajor };
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
typedef typename SparseMatrixType::Index Index;
|
||||
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
|
||||
typedef typename SparseMatrixType::StorageIndex StorageIndex;
|
||||
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
|
||||
|
||||
if(begin!=end)
|
||||
{
|
||||
// pass 1: count the nnz per inner-vector
|
||||
Matrix<Index,Dynamic,1> wi(trMat.outerSize());
|
||||
typename SparseMatrixType::IndexVector wi(trMat.outerSize());
|
||||
wi.setZero();
|
||||
for(InputIterator it(begin); it!=end; ++it)
|
||||
{
|
||||
@ -1034,13 +1033,13 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
|
||||
{
|
||||
eigen_assert(!isCompressed());
|
||||
// TODO, in practice we should be able to use m_innerNonZeros for that task
|
||||
Matrix<Index,Dynamic,1> wi(innerSize());
|
||||
IndexVector wi(innerSize());
|
||||
wi.fill(-1);
|
||||
Index count = 0;
|
||||
StorageIndex count = 0;
|
||||
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
|
||||
for(Index j=0; j<outerSize(); ++j)
|
||||
{
|
||||
Index start = count;
|
||||
StorageIndex start = count;
|
||||
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
|
||||
for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
|
||||
{
|
||||
@ -1089,7 +1088,7 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
|
||||
OtherCopyEval otherCopyEval(otherCopy);
|
||||
|
||||
SparseMatrix dest(other.rows(),other.cols());
|
||||
Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
|
||||
Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
|
||||
|
||||
// pass 1
|
||||
// FIXME the above copy could be merged with that pass
|
||||
@ -1098,8 +1097,8 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
|
||||
++dest.m_outerIndex[it.index()];
|
||||
|
||||
// prefix sum
|
||||
Index count = 0;
|
||||
Matrix<Index,Dynamic,1> positions(dest.outerSize());
|
||||
StorageIndex count = 0;
|
||||
IndexVector positions(dest.outerSize());
|
||||
for (Index j=0; j<dest.outerSize(); ++j)
|
||||
{
|
||||
Index tmp = dest.m_outerIndex[j];
|
||||
@ -1111,7 +1110,7 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
|
||||
// alloc
|
||||
dest.m_data.resize(count);
|
||||
// pass 2
|
||||
for (Index j=0; j<otherCopy.outerSize(); ++j)
|
||||
for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
|
||||
{
|
||||
for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
|
||||
{
|
||||
@ -1139,15 +1138,15 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
|
||||
{
|
||||
eigen_assert(!isCompressed());
|
||||
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
const StorageIndex outer = convert_index(IsRowMajor ? row : col);
|
||||
const StorageIndex inner = convert_index(IsRowMajor ? col : row);
|
||||
|
||||
Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
|
||||
Index innerNNZ = m_innerNonZeros[outer];
|
||||
StorageIndex innerNNZ = m_innerNonZeros[outer];
|
||||
if(innerNNZ>=room)
|
||||
{
|
||||
// this inner vector is full, we need to reallocate the whole buffer :(
|
||||
reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
|
||||
reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
|
||||
}
|
||||
|
||||
Index startId = m_outerIndex[outer];
|
||||
@ -1180,7 +1179,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
|
||||
// we start a new inner vector
|
||||
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
|
||||
{
|
||||
m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
|
||||
m_outerIndex[previousOuter] = convert_index(m_data.size());
|
||||
--previousOuter;
|
||||
}
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
@ -1280,7 +1279,6 @@ struct evaluator<SparseMatrix<_Scalar,_Options,_Index> >
|
||||
: evaluator_base<SparseMatrix<_Scalar,_Options,_Index> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef SparseMatrix<_Scalar,_Options,_Index> SparseMatrixType;
|
||||
typedef typename SparseMatrixType::InnerIterator InnerIterator;
|
||||
typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator;
|
||||
|
@ -30,13 +30,15 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
|
||||
typedef typename internal::add_const_on_value_type_if_arithmetic<
|
||||
typename internal::packet_traits<Scalar>::type
|
||||
>::type PacketReturnType;
|
||||
|
||||
typedef SparseMatrixBase StorageBaseType;
|
||||
typedef EigenBase<Derived> Base;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
|
||||
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator=(const EigenBase<OtherDerived> &other);
|
||||
@ -99,7 +101,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
|
||||
|
||||
// FIXME storage order do not match evaluator storage order
|
||||
typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, Index> PlainObject;
|
||||
typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, StorageIndex> PlainObject;
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** This is the "real scalar" type; if the \a Scalar type is already real numbers
|
||||
@ -142,15 +144,15 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
|
||||
|
||||
/** \returns the number of rows. \sa cols() */
|
||||
inline Index rows() const { return derived().rows(); }
|
||||
inline StorageIndex rows() const { return derived().rows(); }
|
||||
/** \returns the number of columns. \sa rows() */
|
||||
inline Index cols() const { return derived().cols(); }
|
||||
inline StorageIndex cols() const { return derived().cols(); }
|
||||
/** \returns the number of coefficients, which is \a rows()*cols().
|
||||
* \sa rows(), cols(). */
|
||||
inline Index size() const { return rows() * cols(); }
|
||||
inline StorageIndex size() const { return rows() * cols(); }
|
||||
/** \returns the number of nonzero coefficients which is in practice the number
|
||||
* of stored coefficients. */
|
||||
inline Index nonZeros() const { return derived().nonZeros(); }
|
||||
inline StorageIndex nonZeros() const { return derived().nonZeros(); }
|
||||
/** \returns true if either the number of rows or the number of columns is equal to 1.
|
||||
* In other words, this function returns
|
||||
* \code rows()==1 || cols()==1 \endcode
|
||||
@ -158,10 +160,10 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
inline bool isVector() const { return rows()==1 || cols()==1; }
|
||||
/** \returns the size of the storage major dimension,
|
||||
* i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
|
||||
Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
|
||||
StorageIndex outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
|
||||
/** \returns the size of the inner dimension according to the storage order,
|
||||
* i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
|
||||
Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
|
||||
StorageIndex innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
|
||||
|
||||
bool isRValue() const { return m_isRValue; }
|
||||
Derived& markAsRValue() { m_isRValue = true; return derived(); }
|
||||
@ -227,8 +229,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
}
|
||||
else
|
||||
{
|
||||
SparseMatrix<Scalar, RowMajorBit, Index> trans = m;
|
||||
s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, Index> >&>(trans);
|
||||
SparseMatrix<Scalar, RowMajorBit, StorageIndex> trans = m;
|
||||
s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, StorageIndex> >&>(trans);
|
||||
}
|
||||
}
|
||||
return s;
|
||||
@ -288,7 +290,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
{ return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
|
||||
|
||||
/** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
|
||||
SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
|
||||
SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
|
||||
{
|
||||
return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
|
||||
}
|
||||
@ -352,6 +354,10 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
protected:
|
||||
|
||||
bool m_isRValue;
|
||||
|
||||
static inline StorageIndex convert_index(const Index idx) {
|
||||
return internal::convert_index<StorageIndex>(idx);
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -21,15 +21,15 @@ struct traits<permut_sparsematrix_product_retval<PermutationType, MatrixType, Si
|
||||
{
|
||||
typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
|
||||
typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
|
||||
typedef typename MatrixTypeNestedCleaned::Index Index;
|
||||
typedef typename MatrixTypeNestedCleaned::StorageIndex StorageIndex;
|
||||
enum {
|
||||
SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
|
||||
MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
|
||||
};
|
||||
|
||||
typedef typename internal::conditional<MoveOuter,
|
||||
SparseMatrix<Scalar,SrcStorageOrder,Index>,
|
||||
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> >::type ReturnType;
|
||||
SparseMatrix<Scalar,SrcStorageOrder,StorageIndex>,
|
||||
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> >::type ReturnType;
|
||||
};
|
||||
|
||||
template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
|
||||
@ -38,7 +38,7 @@ struct permut_sparsematrix_product_retval
|
||||
{
|
||||
typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
|
||||
typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
|
||||
typedef typename MatrixTypeNestedCleaned::Index Index;
|
||||
typedef typename MatrixTypeNestedCleaned::StorageIndex StorageIndex;
|
||||
|
||||
enum {
|
||||
SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
|
||||
@ -56,8 +56,8 @@ struct permut_sparsematrix_product_retval
|
||||
{
|
||||
if(MoveOuter)
|
||||
{
|
||||
SparseMatrix<Scalar,SrcStorageOrder,Index> tmp(m_matrix.rows(), m_matrix.cols());
|
||||
Matrix<Index,Dynamic,1> sizes(m_matrix.outerSize());
|
||||
SparseMatrix<Scalar,SrcStorageOrder,StorageIndex> tmp(m_matrix.rows(), m_matrix.cols());
|
||||
Matrix<StorageIndex,Dynamic,1> sizes(m_matrix.outerSize());
|
||||
for(Index j=0; j<m_matrix.outerSize(); ++j)
|
||||
{
|
||||
Index jp = m_permutation.indices().coeff(j);
|
||||
@ -76,10 +76,10 @@ struct permut_sparsematrix_product_retval
|
||||
}
|
||||
else
|
||||
{
|
||||
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> tmp(m_matrix.rows(), m_matrix.cols());
|
||||
Matrix<Index,Dynamic,1> sizes(tmp.outerSize());
|
||||
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> tmp(m_matrix.rows(), m_matrix.cols());
|
||||
Matrix<StorageIndex,Dynamic,1> sizes(tmp.outerSize());
|
||||
sizes.setZero();
|
||||
PermutationMatrix<Dynamic,Dynamic,Index> perm;
|
||||
PermutationMatrix<Dynamic,Dynamic,StorageIndex> perm;
|
||||
if((Side==OnTheLeft) ^ Transposed)
|
||||
perm = m_permutation;
|
||||
else
|
||||
|
@ -33,10 +33,10 @@ struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
|
||||
};
|
||||
|
||||
template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
|
||||
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
|
||||
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
|
||||
|
||||
template<int Mode,typename MatrixType,int DestOrder>
|
||||
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
|
||||
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
|
||||
|
||||
}
|
||||
|
||||
@ -48,8 +48,8 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
||||
enum { Mode = _Mode };
|
||||
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef Matrix<Index,Dynamic,1> VectorI;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
|
||||
@ -58,8 +58,8 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
||||
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
|
||||
}
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
inline StorageIndex rows() const { return m_matrix.rows(); }
|
||||
inline StorageIndex cols() const { return m_matrix.cols(); }
|
||||
|
||||
/** \internal \returns a reference to the nested matrix */
|
||||
const _MatrixTypeNested& matrix() const { return m_matrix; }
|
||||
@ -117,22 +117,22 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
||||
SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
|
||||
|
||||
/** \internal triggered by sparse_matrix = SparseSelfadjointView; */
|
||||
template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
|
||||
template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,StorageIndex>& _dest) const
|
||||
{
|
||||
internal::permute_symm_to_fullsymm<Mode>(m_matrix, _dest);
|
||||
}
|
||||
|
||||
template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
|
||||
template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& _dest) const
|
||||
{
|
||||
// TODO directly evaluate into _dest;
|
||||
SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
|
||||
SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(_dest.rows(),_dest.cols());
|
||||
internal::permute_symm_to_fullsymm<Mode>(m_matrix, tmp);
|
||||
_dest = tmp;
|
||||
}
|
||||
|
||||
/** \returns an expression of P H P^-1 */
|
||||
// TODO implement twists in a more evaluator friendly fashion
|
||||
SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
|
||||
SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
|
||||
{
|
||||
return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
|
||||
}
|
||||
@ -215,7 +215,6 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
|
||||
|
||||
typedef typename evaluator<SparseLhsType>::type LhsEval;
|
||||
typedef typename evaluator<SparseLhsType>::InnerIterator LhsIterator;
|
||||
typedef typename SparseLhsType::Index Index;
|
||||
typedef typename SparseLhsType::Scalar LhsScalar;
|
||||
|
||||
enum {
|
||||
@ -302,7 +301,7 @@ struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, Pr
|
||||
}
|
||||
};
|
||||
|
||||
// NOTE: these two overloads are needed to evaluate the sparse sefladjoint view into a full sparse matrix
|
||||
// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
|
||||
// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
|
||||
|
||||
template<typename LhsView, typename Rhs, int ProductTag>
|
||||
@ -353,12 +352,12 @@ protected:
|
||||
namespace internal {
|
||||
|
||||
template<int Mode,typename MatrixType,int DestOrder>
|
||||
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
|
||||
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
|
||||
{
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
|
||||
typedef Matrix<Index,Dynamic,1> VectorI;
|
||||
typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
|
||||
Dest& dest(_dest.derived());
|
||||
enum {
|
||||
@ -401,16 +400,16 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
||||
count[j] = dest.outerIndexPtr()[j];
|
||||
|
||||
// copy data
|
||||
for(Index j = 0; j<size; ++j)
|
||||
for(StorageIndex j = 0; j<size; ++j)
|
||||
{
|
||||
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
|
||||
{
|
||||
Index i = it.index();
|
||||
StorageIndex i = internal::convert_index<StorageIndex>(it.index());
|
||||
Index r = it.row();
|
||||
Index c = it.col();
|
||||
|
||||
Index jp = perm ? perm[j] : j;
|
||||
Index ip = perm ? perm[i] : i;
|
||||
StorageIndex jp = perm ? perm[j] : j;
|
||||
StorageIndex ip = perm ? perm[i] : i;
|
||||
|
||||
if(Mode==(Upper|Lower))
|
||||
{
|
||||
@ -440,12 +439,12 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
||||
}
|
||||
|
||||
template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
|
||||
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
|
||||
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
|
||||
{
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
SparseMatrix<Scalar,DstOrder,Index>& dest(_dest.derived());
|
||||
typedef Matrix<Index,Dynamic,1> VectorI;
|
||||
SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
enum {
|
||||
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
|
||||
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
|
||||
@ -453,20 +452,20 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
|
||||
};
|
||||
|
||||
Index size = mat.rows();
|
||||
StorageIndex size = mat.rows();
|
||||
VectorI count(size);
|
||||
count.setZero();
|
||||
dest.resize(size,size);
|
||||
for(Index j = 0; j<size; ++j)
|
||||
for(StorageIndex j = 0; j<size; ++j)
|
||||
{
|
||||
Index jp = perm ? perm[j] : j;
|
||||
StorageIndex jp = perm ? perm[j] : j;
|
||||
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
|
||||
{
|
||||
Index i = it.index();
|
||||
StorageIndex i = it.index();
|
||||
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
||||
continue;
|
||||
|
||||
Index ip = perm ? perm[i] : i;
|
||||
StorageIndex ip = perm ? perm[i] : i;
|
||||
count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||
}
|
||||
}
|
||||
@ -477,17 +476,17 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
for(Index j=0; j<size; ++j)
|
||||
count[j] = dest.outerIndexPtr()[j];
|
||||
|
||||
for(Index j = 0; j<size; ++j)
|
||||
for(StorageIndex j = 0; j<size; ++j)
|
||||
{
|
||||
|
||||
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
|
||||
{
|
||||
Index i = it.index();
|
||||
StorageIndex i = it.index();
|
||||
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
||||
continue;
|
||||
|
||||
Index jp = perm ? perm[j] : j;
|
||||
Index ip = perm? perm[i] : i;
|
||||
StorageIndex jp = perm ? perm[j] : j;
|
||||
StorageIndex ip = perm? perm[i] : i;
|
||||
|
||||
Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||
dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
|
||||
@ -519,11 +518,11 @@ class SparseSymmetricPermutationProduct
|
||||
{
|
||||
public:
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
protected:
|
||||
typedef PermutationMatrix<Dynamic,Dynamic,Index> Perm;
|
||||
typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
|
||||
public:
|
||||
typedef Matrix<Index,Dynamic,1> VectorI;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
|
||||
@ -531,8 +530,8 @@ class SparseSymmetricPermutationProduct
|
||||
: m_matrix(mat), m_perm(perm)
|
||||
{}
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
inline StorageIndex rows() const { return m_matrix.rows(); }
|
||||
inline StorageIndex cols() const { return m_matrix.cols(); }
|
||||
|
||||
template<typename DestScalar, int Options, typename DstIndex>
|
||||
void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
|
||||
|
@ -22,16 +22,16 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
|
||||
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
|
||||
|
||||
typedef typename remove_all<Lhs>::type::Scalar Scalar;
|
||||
typedef typename remove_all<Lhs>::type::Index Index;
|
||||
typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;
|
||||
|
||||
// make sure to call innerSize/outerSize since we fake the storage order.
|
||||
Index rows = lhs.innerSize();
|
||||
Index cols = rhs.outerSize();
|
||||
StorageIndex rows = lhs.innerSize();
|
||||
StorageIndex cols = rhs.outerSize();
|
||||
//Index size = lhs.outerSize();
|
||||
eigen_assert(lhs.outerSize() == rhs.innerSize());
|
||||
|
||||
// allocate a temporary buffer
|
||||
AmbiVector<Scalar,Index> tempVector(rows);
|
||||
AmbiVector<Scalar,StorageIndex> tempVector(rows);
|
||||
|
||||
// estimate the number of non zero entries
|
||||
// given a rhs column containing Y non zeros, we assume that the respective Y columns
|
||||
@ -39,7 +39,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
|
||||
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
|
||||
// per column of the lhs.
|
||||
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
|
||||
Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
|
||||
StorageIndex estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
|
||||
|
||||
// mimics a resizeByInnerOuter:
|
||||
if(ResultType::IsRowMajor)
|
||||
@ -70,7 +70,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
|
||||
}
|
||||
}
|
||||
res.startVec(j);
|
||||
for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector,tolerance); it; ++it)
|
||||
for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
|
||||
res.insertBackByOuterInner(j,it.index()) = it.value();
|
||||
}
|
||||
res.finalize();
|
||||
@ -103,7 +103,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
// we need a col-major matrix to hold the result
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> SparseTemporaryType;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> SparseTemporaryType;
|
||||
SparseTemporaryType _res(res.rows(), res.cols());
|
||||
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
|
||||
res = _res;
|
||||
@ -129,8 +129,8 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
|
||||
ColMajorMatrixLhs colLhs(lhs);
|
||||
ColMajorMatrixRhs colRhs(rhs);
|
||||
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
|
||||
@ -149,7 +149,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,R
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::Index> RowMajorMatrixLhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;
|
||||
RowMajorMatrixLhs rowLhs(lhs);
|
||||
sparse_sparse_product_with_pruning_selector<RowMajorMatrixLhs,Rhs,ResultType,RowMajor,RowMajor>(rowLhs,rhs,res,tolerance);
|
||||
}
|
||||
@ -161,7 +161,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,C
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::Index> RowMajorMatrixRhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;
|
||||
RowMajorMatrixRhs rowRhs(rhs);
|
||||
sparse_sparse_product_with_pruning_selector<Lhs,RowMajorMatrixRhs,ResultType,RowMajor,RowMajor,RowMajor>(lhs,rowRhs,res,tolerance);
|
||||
}
|
||||
@ -173,7 +173,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,R
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
|
||||
ColMajorMatrixRhs colRhs(rhs);
|
||||
internal::sparse_sparse_product_with_pruning_impl<Lhs,ColMajorMatrixRhs,ResultType>(lhs, colRhs, res, tolerance);
|
||||
}
|
||||
@ -185,7 +185,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,C
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
|
||||
ColMajorMatrixLhs colLhs(lhs);
|
||||
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,Rhs,ResultType>(colLhs, rhs, res, tolerance);
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
|
||||
protected:
|
||||
typedef SparseMatrixBase<Transpose<MatrixType> > Base;
|
||||
public:
|
||||
inline typename MatrixType::Index nonZeros() const { return Base::derived().nestedExpression().nonZeros(); }
|
||||
inline typename MatrixType::StorageIndex nonZeros() const { return Base::derived().nestedExpression().nonZeros(); }
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
@ -33,28 +33,28 @@ struct unary_evaluator<Transpose<ArgType>, IteratorBased>
|
||||
typedef typename evaluator<ArgType>::ReverseInnerIterator EvalReverseIterator;
|
||||
public:
|
||||
typedef Transpose<ArgType> XprType;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
|
||||
class InnerIterator : public EvalIterator
|
||||
{
|
||||
public:
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer)
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
|
||||
: EvalIterator(unaryOp.m_argImpl,outer)
|
||||
{}
|
||||
|
||||
Index row() const { return EvalIterator::col(); }
|
||||
Index col() const { return EvalIterator::row(); }
|
||||
StorageIndex row() const { return EvalIterator::col(); }
|
||||
StorageIndex col() const { return EvalIterator::row(); }
|
||||
};
|
||||
|
||||
class ReverseInnerIterator : public EvalReverseIterator
|
||||
{
|
||||
public:
|
||||
EIGEN_STRONG_INLINE ReverseInnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer)
|
||||
EIGEN_STRONG_INLINE ReverseInnerIterator(const unary_evaluator& unaryOp, Index outer)
|
||||
: EvalReverseIterator(unaryOp.m_argImpl,outer)
|
||||
{}
|
||||
|
||||
Index row() const { return EvalReverseIterator::col(); }
|
||||
Index col() const { return EvalReverseIterator::row(); }
|
||||
StorageIndex row() const { return EvalReverseIterator::col(); }
|
||||
StorageIndex col() const { return EvalReverseIterator::row(); }
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -64,7 +64,7 @@ template<typename MatrixType, unsigned int Mode>
|
||||
class TriangularViewImpl<MatrixType,Mode,Sparse>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
|
||||
{
|
||||
typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
|
||||
typedef typename TriangularViewType::Index Index;
|
||||
typedef typename TriangularViewType::StorageIndex StorageIndex;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const TriangularViewImpl& view, Index outer)
|
||||
@ -102,9 +102,9 @@ class TriangularViewImpl<MatrixType,Mode,Sparse>::InnerIterator : public MatrixT
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); }
|
||||
inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); }
|
||||
inline Index index() const
|
||||
inline StorageIndex row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); }
|
||||
inline StorageIndex col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); }
|
||||
inline StorageIndex index() const
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne) return Base::outer();
|
||||
else return Base::index();
|
||||
@ -134,7 +134,7 @@ template<typename MatrixType, unsigned int Mode>
|
||||
class TriangularViewImpl<MatrixType,Mode,Sparse>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
|
||||
{
|
||||
typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
|
||||
typedef typename TriangularViewImpl::Index Index;
|
||||
typedef typename TriangularViewImpl::StorageIndex StorageIndex;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE ReverseInnerIterator(const TriangularViewType& view, Index outer)
|
||||
@ -150,8 +150,8 @@ class TriangularViewImpl<MatrixType,Mode,Sparse>::ReverseInnerIterator : public
|
||||
EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
|
||||
{ Base::operator--(); return *this; }
|
||||
|
||||
inline Index row() const { return Base::row(); }
|
||||
inline Index col() const { return Base::col(); }
|
||||
inline StorageIndex row() const { return Base::row(); }
|
||||
inline StorageIndex col() const { return Base::col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const
|
||||
{
|
||||
@ -175,7 +175,7 @@ struct unary_evaluator<TriangularView<ArgType,Mode>, IteratorBased>
|
||||
protected:
|
||||
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
|
||||
enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit))
|
||||
@ -246,9 +246,9 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); }
|
||||
// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); }
|
||||
inline Index index() const
|
||||
// inline StorageIndex row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); }
|
||||
// inline StorageIndex col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); }
|
||||
inline StorageIndex index() const
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne) return Base::outer();
|
||||
else return Base::index();
|
||||
|
@ -43,20 +43,22 @@ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
|
||||
EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
|
||||
EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
|
||||
|
||||
// TODO this is mostly the same as EIGEN_GENERIC_PUBLIC_INTERFACE
|
||||
#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
|
||||
typedef BaseClass Base; \
|
||||
typedef typename Eigen::internal::traits<Derived >::Scalar Scalar; \
|
||||
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
|
||||
typedef typename Eigen::internal::nested<Derived >::type Nested; \
|
||||
typedef typename Eigen::internal::traits<Derived >::StorageKind StorageKind; \
|
||||
typedef typename Eigen::internal::traits<Derived >::Index Index; \
|
||||
typedef typename Eigen::internal::traits<Derived >::StorageIndex StorageIndex; \
|
||||
enum { RowsAtCompileTime = Eigen::internal::traits<Derived >::RowsAtCompileTime, \
|
||||
ColsAtCompileTime = Eigen::internal::traits<Derived >::ColsAtCompileTime, \
|
||||
Flags = Eigen::internal::traits<Derived>::Flags, \
|
||||
SizeAtCompileTime = Base::SizeAtCompileTime, \
|
||||
IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
|
||||
using Base::derived; \
|
||||
using Base::const_cast_derived;
|
||||
using Base::const_cast_derived; \
|
||||
using Base::convert_index;
|
||||
|
||||
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
|
||||
_EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived >)
|
||||
@ -67,10 +69,10 @@ const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
|
||||
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
|
||||
|
||||
template<typename Derived> class SparseMatrixBase;
|
||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
|
||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class DynamicSparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseVector;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class MappedSparseMatrix;
|
||||
|
||||
template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
|
||||
template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
|
||||
@ -99,24 +101,25 @@ template<typename T> struct eval<T,Sparse>
|
||||
|
||||
template<typename T,int Cols> struct sparse_eval<T,1,Cols> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::Index _Index;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
public:
|
||||
typedef SparseVector<_Scalar, RowMajor, _Index> type;
|
||||
typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type;
|
||||
};
|
||||
|
||||
template<typename T,int Rows> struct sparse_eval<T,Rows,1> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::Index _Index;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
public:
|
||||
typedef SparseVector<_Scalar, ColMajor, _Index> type;
|
||||
typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type;
|
||||
};
|
||||
|
||||
// TODO this seems almost identical to plain_matrix_type<T, Sparse>
|
||||
template<typename T,int Rows,int Cols> struct sparse_eval {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::Index _Index;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
public:
|
||||
typedef SparseMatrix<_Scalar, _Options, _Index> type;
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
|
||||
};
|
||||
|
||||
template<typename T> struct sparse_eval<T,1,1> {
|
||||
@ -128,10 +131,10 @@ template<typename T> struct sparse_eval<T,1,1> {
|
||||
template<typename T> struct plain_matrix_type<T,Sparse>
|
||||
{
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::Index _Index;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
enum { _Options = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
public:
|
||||
typedef SparseMatrix<_Scalar, _Options, _Index> type;
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
|
||||
};
|
||||
|
||||
template<typename Decomposition, typename RhsType>
|
||||
@ -162,26 +165,26 @@ template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTria
|
||||
*
|
||||
* \sa SparseMatrix::setFromTriplets()
|
||||
*/
|
||||
template<typename Scalar, typename Index=typename SparseMatrix<Scalar>::Index >
|
||||
template<typename Scalar, typename StorageIndex=typename SparseMatrix<Scalar>::StorageIndex >
|
||||
class Triplet
|
||||
{
|
||||
public:
|
||||
Triplet() : m_row(0), m_col(0), m_value(0) {}
|
||||
|
||||
Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0))
|
||||
Triplet(const StorageIndex& i, const StorageIndex& j, const Scalar& v = Scalar(0))
|
||||
: m_row(i), m_col(j), m_value(v)
|
||||
{}
|
||||
|
||||
/** \returns the row index of the element */
|
||||
const Index& row() const { return m_row; }
|
||||
const StorageIndex& row() const { return m_row; }
|
||||
|
||||
/** \returns the column index of the element */
|
||||
const Index& col() const { return m_col; }
|
||||
const StorageIndex& col() const { return m_col; }
|
||||
|
||||
/** \returns the value of the element */
|
||||
const Scalar& value() const { return m_value; }
|
||||
protected:
|
||||
Index m_row, m_col;
|
||||
StorageIndex m_row, m_col;
|
||||
Scalar m_value;
|
||||
};
|
||||
|
||||
|
@ -26,11 +26,11 @@ namespace Eigen {
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
struct traits<SparseVector<_Scalar, _Options, _Index> >
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
struct traits<SparseVector<_Scalar, _Options, _StorageIndex> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@ -61,9 +61,9 @@ struct sparse_vector_assign_selector;
|
||||
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
class SparseVector
|
||||
: public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
|
||||
: public SparseMatrixBase<SparseVector<_Scalar, _Options, _StorageIndex> >
|
||||
{
|
||||
typedef SparseMatrixBase<SparseVector> SparseBase;
|
||||
|
||||
@ -72,23 +72,23 @@ class SparseVector
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
|
||||
|
||||
typedef internal::CompressedStorage<Scalar,Index> Storage;
|
||||
typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
|
||||
enum { IsColVector = internal::traits<SparseVector>::IsColVector };
|
||||
|
||||
enum {
|
||||
Options = _Options
|
||||
};
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
|
||||
EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
|
||||
EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
|
||||
EIGEN_STRONG_INLINE StorageIndex rows() const { return IsColVector ? m_size : 1; }
|
||||
EIGEN_STRONG_INLINE StorageIndex cols() const { return IsColVector ? 1 : m_size; }
|
||||
EIGEN_STRONG_INLINE StorageIndex innerSize() const { return m_size; }
|
||||
EIGEN_STRONG_INLINE StorageIndex outerSize() const { return 1; }
|
||||
|
||||
EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); }
|
||||
EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); }
|
||||
|
||||
EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); }
|
||||
EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); }
|
||||
EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return &m_data.index(0); }
|
||||
EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return &m_data.index(0); }
|
||||
|
||||
/** \internal */
|
||||
inline Storage& data() { return m_data; }
|
||||
@ -132,7 +132,7 @@ class SparseVector
|
||||
inline void setZero() { m_data.clear(); }
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
|
||||
inline StorageIndex nonZeros() const { return static_cast<StorageIndex>(m_data.size()); }
|
||||
|
||||
inline void startVec(Index outer)
|
||||
{
|
||||
@ -188,7 +188,7 @@ class SparseVector
|
||||
m_data.value(p+1) = m_data.value(p);
|
||||
--p;
|
||||
}
|
||||
m_data.index(p+1) = i;
|
||||
m_data.index(p+1) = convert_index(i);
|
||||
m_data.value(p+1) = 0;
|
||||
return m_data.value(p+1);
|
||||
}
|
||||
@ -207,13 +207,13 @@ class SparseVector
|
||||
|
||||
void resize(Index rows, Index cols)
|
||||
{
|
||||
eigen_assert(rows==1 || cols==1);
|
||||
eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1");
|
||||
resize(IsColVector ? rows : cols);
|
||||
}
|
||||
|
||||
void resize(Index newSize)
|
||||
{
|
||||
m_size = newSize;
|
||||
m_size = convert_index(newSize);
|
||||
m_data.clear();
|
||||
}
|
||||
|
||||
@ -348,27 +348,27 @@ protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
|
||||
}
|
||||
|
||||
Storage m_data;
|
||||
Index m_size;
|
||||
StorageIndex m_size;
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Options, typename _Index>
|
||||
class SparseVector<Scalar,_Options,_Index>::InnerIterator
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
class SparseVector<Scalar,_Options,_StorageIndex>::InnerIterator
|
||||
{
|
||||
public:
|
||||
explicit InnerIterator(const SparseVector& vec, Index outer=0)
|
||||
: m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
|
||||
: m_data(vec.m_data), m_id(0), m_end(convert_index(m_data.size()))
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(outer);
|
||||
eigen_assert(outer==0);
|
||||
}
|
||||
|
||||
explicit InnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
|
||||
: m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
|
||||
explicit InnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
|
||||
: m_data(data), m_id(0), m_end(convert_index(m_data.size()))
|
||||
{}
|
||||
|
||||
inline InnerIterator& operator++() { m_id++; return *this; }
|
||||
@ -376,16 +376,16 @@ class SparseVector<Scalar,_Options,_Index>::InnerIterator
|
||||
inline Scalar value() const { return m_data.value(m_id); }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
|
||||
|
||||
inline Index index() const { return m_data.index(m_id); }
|
||||
inline Index row() const { return IsColVector ? index() : 0; }
|
||||
inline Index col() const { return IsColVector ? 0 : index(); }
|
||||
inline StorageIndex index() const { return m_data.index(m_id); }
|
||||
inline StorageIndex row() const { return IsColVector ? index() : 0; }
|
||||
inline StorageIndex col() const { return IsColVector ? 0 : index(); }
|
||||
|
||||
inline operator bool() const { return (m_id < m_end); }
|
||||
|
||||
protected:
|
||||
const internal::CompressedStorage<Scalar,Index>& m_data;
|
||||
Index m_id;
|
||||
const Index m_end;
|
||||
const internal::CompressedStorage<Scalar,StorageIndex>& m_data;
|
||||
StorageIndex m_id;
|
||||
const StorageIndex m_end;
|
||||
private:
|
||||
// If you get here, then you're not using the right InnerIterator type, e.g.:
|
||||
// SparseMatrix<double,RowMajor> A;
|
||||
@ -393,19 +393,19 @@ class SparseVector<Scalar,_Options,_Index>::InnerIterator
|
||||
template<typename T> InnerIterator(const SparseMatrixBase<T>&,Index outer=0);
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Options, typename _Index>
|
||||
class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
class SparseVector<Scalar,_Options,_StorageIndex>::ReverseInnerIterator
|
||||
{
|
||||
public:
|
||||
explicit ReverseInnerIterator(const SparseVector& vec, Index outer=0)
|
||||
: m_data(vec.m_data), m_id(static_cast<Index>(m_data.size())), m_start(0)
|
||||
: m_data(vec.m_data), m_id(convert_index(m_data.size())), m_start(0)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(outer);
|
||||
eigen_assert(outer==0);
|
||||
}
|
||||
|
||||
explicit ReverseInnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
|
||||
: m_data(data), m_id(static_cast<Index>(m_data.size())), m_start(0)
|
||||
explicit ReverseInnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
|
||||
: m_data(data), m_id(convert_index(m_data.size())), m_start(0)
|
||||
{}
|
||||
|
||||
inline ReverseInnerIterator& operator--() { m_id--; return *this; }
|
||||
@ -413,15 +413,15 @@ class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
|
||||
inline Scalar value() const { return m_data.value(m_id-1); }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id-1)); }
|
||||
|
||||
inline Index index() const { return m_data.index(m_id-1); }
|
||||
inline Index row() const { return IsColVector ? index() : 0; }
|
||||
inline Index col() const { return IsColVector ? 0 : index(); }
|
||||
inline StorageIndex index() const { return m_data.index(m_id-1); }
|
||||
inline StorageIndex row() const { return IsColVector ? index() : 0; }
|
||||
inline StorageIndex col() const { return IsColVector ? 0 : index(); }
|
||||
|
||||
inline operator bool() const { return (m_id > m_start); }
|
||||
|
||||
protected:
|
||||
const internal::CompressedStorage<Scalar,Index>& m_data;
|
||||
Index m_id;
|
||||
const internal::CompressedStorage<Scalar,StorageIndex>& m_data;
|
||||
StorageIndex m_id;
|
||||
const Index m_start;
|
||||
};
|
||||
|
||||
@ -465,7 +465,7 @@ struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
|
||||
eigen_internal_assert(src.outerSize()==src.size());
|
||||
typedef typename internal::evaluator<Src>::type SrcEvaluatorType;
|
||||
SrcEvaluatorType srcEval(src);
|
||||
for(typename Dest::Index i=0; i<src.size(); ++i)
|
||||
for(Index i=0; i<src.size(); ++i)
|
||||
{
|
||||
typename SrcEvaluatorType::InnerIterator it(srcEval, i);
|
||||
if(it)
|
||||
|
@ -18,7 +18,7 @@ namespace internal {
|
||||
template<typename MatrixType>
|
||||
struct traits<SparseView<MatrixType> > : traits<MatrixType>
|
||||
{
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Sparse StorageKind;
|
||||
enum {
|
||||
Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
|
||||
@ -40,11 +40,11 @@ public:
|
||||
RealScalar m_epsilon = NumTraits<Scalar>::dummy_precision()) :
|
||||
m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
inline StorageIndex rows() const { return m_matrix.rows(); }
|
||||
inline StorageIndex cols() const { return m_matrix.cols(); }
|
||||
|
||||
inline Index innerSize() const { return m_matrix.innerSize(); }
|
||||
inline Index outerSize() const { return m_matrix.outerSize(); }
|
||||
inline StorageIndex innerSize() const { return m_matrix.innerSize(); }
|
||||
inline StorageIndex outerSize() const { return m_matrix.outerSize(); }
|
||||
|
||||
/** \returns the nested expression */
|
||||
const typename internal::remove_all<MatrixTypeNested>::type&
|
||||
@ -126,7 +126,7 @@ struct unary_evaluator<SparseView<ArgType>, IndexBased>
|
||||
typedef SparseView<ArgType> XprType;
|
||||
protected:
|
||||
enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
|
||||
typedef typename XprType::Index Index;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
public:
|
||||
|
||||
@ -134,7 +134,7 @@ struct unary_evaluator<SparseView<ArgType>, IndexBased>
|
||||
{
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, typename XprType::Index outer)
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
|
||||
: m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize())
|
||||
{
|
||||
incrementToNonZero();
|
||||
@ -153,17 +153,17 @@ struct unary_evaluator<SparseView<ArgType>, IndexBased>
|
||||
: m_sve.m_argImpl.coeff(m_inner, m_outer);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Index index() const { return m_inner; }
|
||||
inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; }
|
||||
inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
|
||||
|
||||
protected:
|
||||
const unary_evaluator &m_sve;
|
||||
Index m_inner;
|
||||
const Index m_outer;
|
||||
const Index m_end;
|
||||
StorageIndex m_inner;
|
||||
const StorageIndex m_outer;
|
||||
const StorageIndex m_end;
|
||||
|
||||
private:
|
||||
void incrementToNonZero()
|
||||
|
@ -28,7 +28,7 @@ template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<Lhs>::type LhsEval;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
@ -66,7 +66,7 @@ template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<Lhs>::type LhsEval;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
@ -106,7 +106,7 @@ template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<Lhs>::type LhsEval;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
@ -142,7 +142,7 @@ template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename Lhs::Index Index;
|
||||
typedef typename Lhs::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<Lhs>::type LhsEval;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
@ -212,12 +212,12 @@ template<typename Lhs, typename Rhs, int Mode, int UpLo>
|
||||
struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename promote_index_type<typename traits<Lhs>::Index,
|
||||
typename traits<Rhs>::Index>::type Index;
|
||||
typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,
|
||||
typename traits<Rhs>::StorageIndex>::type StorageIndex;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
const bool IsLower = (UpLo==Lower);
|
||||
AmbiVector<Scalar,Index> tempVector(other.rows()*2);
|
||||
AmbiVector<Scalar,StorageIndex> tempVector(other.rows()*2);
|
||||
tempVector.setBounds(0,other.rows());
|
||||
|
||||
Rhs res(other.rows(), other.cols());
|
||||
@ -273,7 +273,7 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
|
||||
Index count = 0;
|
||||
// FIXME compute a reference value to filter zeros
|
||||
for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
|
||||
for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector/*,1e-12*/); it; ++it)
|
||||
{
|
||||
++ count;
|
||||
// std::cerr << "fill " << it.index() << ", " << col << "\n";
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::Index> > class SparseLU;
|
||||
template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::StorageIndex> > class SparseLU;
|
||||
template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
|
||||
template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
|
||||
|
||||
@ -70,7 +70,7 @@ template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixURetu
|
||||
* \sa \ref OrderingMethods_Module
|
||||
*/
|
||||
template <typename _MatrixType, typename _OrderingType>
|
||||
class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >, public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::Index>
|
||||
class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >, public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::StorageIndex>
|
||||
{
|
||||
protected:
|
||||
typedef SparseSolverBase<SparseLU<_MatrixType,_OrderingType> > APIBase;
|
||||
@ -82,13 +82,13 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
|
||||
typedef _OrderingType OrderingType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef SparseMatrix<Scalar,ColMajor,Index> NCMatrix;
|
||||
typedef internal::MappedSuperNodalMatrix<Scalar, Index> SCMatrix;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> NCMatrix;
|
||||
typedef internal::MappedSuperNodalMatrix<Scalar, StorageIndex> SCMatrix;
|
||||
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
||||
typedef Matrix<Index,Dynamic,1> IndexVector;
|
||||
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
|
||||
typedef internal::SparseLUImpl<Scalar, Index> Base;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
|
||||
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
|
||||
typedef internal::SparseLUImpl<Scalar, StorageIndex> Base;
|
||||
|
||||
public:
|
||||
SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
|
||||
@ -122,8 +122,8 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
|
||||
factorize(matrix);
|
||||
}
|
||||
|
||||
inline Index rows() const { return m_mat.rows(); }
|
||||
inline Index cols() const { return m_mat.cols(); }
|
||||
inline StorageIndex rows() const { return m_mat.rows(); }
|
||||
inline StorageIndex cols() const { return m_mat.cols(); }
|
||||
/** Indicate that the pattern of the input matrix is symmetric */
|
||||
void isSymmetric(bool sym)
|
||||
{
|
||||
@ -146,9 +146,9 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
|
||||
* y = b; matrixU().solveInPlace(y);
|
||||
* \endcode
|
||||
*/
|
||||
SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,Index> > matrixU() const
|
||||
SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,StorageIndex> > matrixU() const
|
||||
{
|
||||
return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,Index> >(m_Lstore, m_Ustore);
|
||||
return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,StorageIndex> >(m_Lstore, m_Ustore);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -324,7 +324,7 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
|
||||
std::string m_lastError;
|
||||
NCMatrix m_mat; // The input (permuted ) matrix
|
||||
SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
|
||||
MappedSparseMatrix<Scalar,ColMajor,Index> m_Ustore; // The upper triangular matrix
|
||||
MappedSparseMatrix<Scalar,ColMajor,StorageIndex> m_Ustore; // The upper triangular matrix
|
||||
PermutationType m_perm_c; // Column permutation
|
||||
PermutationType m_perm_r ; // Row permutation
|
||||
IndexVector m_etree; // Column elimination tree
|
||||
@ -334,10 +334,10 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
|
||||
// SparseLU options
|
||||
bool m_symmetricmode;
|
||||
// values for performance
|
||||
internal::perfvalues<Index> m_perfv;
|
||||
internal::perfvalues<StorageIndex> m_perfv;
|
||||
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
|
||||
Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
|
||||
Index m_detPermR; // Determinant of the coefficient matrix
|
||||
StorageIndex m_nnzL, m_nnzU; // Nonzeros in L and U factors
|
||||
StorageIndex m_detPermR; // Determinant of the coefficient matrix
|
||||
private:
|
||||
// Disable copy constructor
|
||||
SparseLU (const SparseLU& );
|
||||
@ -375,7 +375,7 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
|
||||
{
|
||||
m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
|
||||
// Then, permute only the column pointers
|
||||
ei_declare_aligned_stack_constructed_variable(Index,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast<Index*>(mat.outerIndexPtr()):0);
|
||||
ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast<StorageIndex*>(mat.outerIndexPtr()):0);
|
||||
|
||||
// If the input matrix 'mat' is uncompressed, then the outer-indices do not match the ones of m_mat, and a copy is thus needed.
|
||||
if(!mat.isCompressed())
|
||||
@ -640,7 +640,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||
// Create supernode matrix L
|
||||
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
|
||||
// Create the column major upper sparse matrix U;
|
||||
new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, Index> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
|
||||
new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, StorageIndex> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
|
||||
|
||||
m_info = Success;
|
||||
m_factorizationIsOk = true;
|
||||
@ -649,12 +649,12 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||
template<typename MappedSupernodalType>
|
||||
struct SparseLUMatrixLReturnType : internal::no_assignment_operator
|
||||
{
|
||||
typedef typename MappedSupernodalType::Index Index;
|
||||
typedef typename MappedSupernodalType::StorageIndex StorageIndex;
|
||||
typedef typename MappedSupernodalType::Scalar Scalar;
|
||||
explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
|
||||
{ }
|
||||
Index rows() { return m_mapL.rows(); }
|
||||
Index cols() { return m_mapL.cols(); }
|
||||
StorageIndex rows() { return m_mapL.rows(); }
|
||||
StorageIndex cols() { return m_mapL.cols(); }
|
||||
template<typename Dest>
|
||||
void solveInPlace( MatrixBase<Dest> &X) const
|
||||
{
|
||||
@ -666,21 +666,18 @@ struct SparseLUMatrixLReturnType : internal::no_assignment_operator
|
||||
template<typename MatrixLType, typename MatrixUType>
|
||||
struct SparseLUMatrixUReturnType : internal::no_assignment_operator
|
||||
{
|
||||
typedef typename MatrixLType::Index Index;
|
||||
typedef typename MatrixLType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixLType::Scalar Scalar;
|
||||
explicit SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
|
||||
: m_mapL(mapL),m_mapU(mapU)
|
||||
{ }
|
||||
Index rows() { return m_mapL.rows(); }
|
||||
Index cols() { return m_mapL.cols(); }
|
||||
StorageIndex rows() { return m_mapL.rows(); }
|
||||
StorageIndex cols() { return m_mapL.cols(); }
|
||||
|
||||
template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const
|
||||
{
|
||||
/* Explicit type conversion as the Index type of MatrixBase<Dest> may be wider than Index */
|
||||
eigen_assert(X.rows() <= NumTraits<Index>::highest());
|
||||
eigen_assert(X.cols() <= NumTraits<Index>::highest());
|
||||
Index nrhs = Index(X.cols());
|
||||
Index n = Index(X.rows());
|
||||
Index nrhs = X.cols();
|
||||
Index n = X.rows();
|
||||
// Backward solve with U
|
||||
for (Index k = m_mapL.nsuper(); k >= 0; k--)
|
||||
{
|
||||
|
@ -29,20 +29,20 @@ namespace internal {
|
||||
* SuperInnerIterator to iterate through all supernodes
|
||||
* Function for triangular solve
|
||||
*/
|
||||
template <typename _Scalar, typename _Index>
|
||||
template <typename _Scalar, typename _StorageIndex>
|
||||
class MappedSuperNodalMatrix
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef Matrix<Index,Dynamic,1> IndexVector;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
|
||||
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
||||
public:
|
||||
MappedSuperNodalMatrix()
|
||||
{
|
||||
|
||||
}
|
||||
MappedSuperNodalMatrix(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
|
||||
MappedSuperNodalMatrix(StorageIndex m, StorageIndex n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
|
||||
IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
|
||||
{
|
||||
setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col);
|
||||
@ -58,7 +58,7 @@ class MappedSuperNodalMatrix
|
||||
* FIXME This class will be modified such that it can be use in the course
|
||||
* of the factorization.
|
||||
*/
|
||||
void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
|
||||
void setInfos(StorageIndex m, StorageIndex n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
|
||||
IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
|
||||
{
|
||||
m_row = m;
|
||||
@ -75,12 +75,12 @@ class MappedSuperNodalMatrix
|
||||
/**
|
||||
* Number of rows
|
||||
*/
|
||||
Index rows() { return m_row; }
|
||||
StorageIndex rows() { return m_row; }
|
||||
|
||||
/**
|
||||
* Number of columns
|
||||
*/
|
||||
Index cols() { return m_col; }
|
||||
StorageIndex cols() { return m_col; }
|
||||
|
||||
/**
|
||||
* Return the array of nonzero values packed by column
|
||||
@ -96,12 +96,12 @@ class MappedSuperNodalMatrix
|
||||
/**
|
||||
* Return the pointers to the beginning of each column in \ref valuePtr()
|
||||
*/
|
||||
Index* colIndexPtr()
|
||||
StorageIndex* colIndexPtr()
|
||||
{
|
||||
return m_nzval_colptr;
|
||||
}
|
||||
|
||||
const Index* colIndexPtr() const
|
||||
const StorageIndex* colIndexPtr() const
|
||||
{
|
||||
return m_nzval_colptr;
|
||||
}
|
||||
@ -109,9 +109,9 @@ class MappedSuperNodalMatrix
|
||||
/**
|
||||
* Return the array of compressed row indices of all supernodes
|
||||
*/
|
||||
Index* rowIndex() { return m_rowind; }
|
||||
StorageIndex* rowIndex() { return m_rowind; }
|
||||
|
||||
const Index* rowIndex() const
|
||||
const StorageIndex* rowIndex() const
|
||||
{
|
||||
return m_rowind;
|
||||
}
|
||||
@ -119,9 +119,9 @@ class MappedSuperNodalMatrix
|
||||
/**
|
||||
* Return the location in \em rowvaluePtr() which starts each column
|
||||
*/
|
||||
Index* rowIndexPtr() { return m_rowind_colptr; }
|
||||
StorageIndex* rowIndexPtr() { return m_rowind_colptr; }
|
||||
|
||||
const Index* rowIndexPtr() const
|
||||
const StorageIndex* rowIndexPtr() const
|
||||
{
|
||||
return m_rowind_colptr;
|
||||
}
|
||||
@ -129,18 +129,18 @@ class MappedSuperNodalMatrix
|
||||
/**
|
||||
* Return the array of column-to-supernode mapping
|
||||
*/
|
||||
Index* colToSup() { return m_col_to_sup; }
|
||||
StorageIndex* colToSup() { return m_col_to_sup; }
|
||||
|
||||
const Index* colToSup() const
|
||||
const StorageIndex* colToSup() const
|
||||
{
|
||||
return m_col_to_sup;
|
||||
}
|
||||
/**
|
||||
* Return the array of supernode-to-column mapping
|
||||
*/
|
||||
Index* supToCol() { return m_sup_to_col; }
|
||||
StorageIndex* supToCol() { return m_sup_to_col; }
|
||||
|
||||
const Index* supToCol() const
|
||||
const StorageIndex* supToCol() const
|
||||
{
|
||||
return m_sup_to_col;
|
||||
}
|
||||
@ -148,7 +148,7 @@ class MappedSuperNodalMatrix
|
||||
/**
|
||||
* Return the number of supernodes
|
||||
*/
|
||||
Index nsuper() const
|
||||
StorageIndex nsuper() const
|
||||
{
|
||||
return m_nsuper;
|
||||
}
|
||||
@ -161,15 +161,15 @@ class MappedSuperNodalMatrix
|
||||
|
||||
|
||||
protected:
|
||||
Index m_row; // Number of rows
|
||||
Index m_col; // Number of columns
|
||||
Index m_nsuper; // Number of supernodes
|
||||
StorageIndex m_row; // Number of rows
|
||||
StorageIndex m_col; // Number of columns
|
||||
StorageIndex m_nsuper; // Number of supernodes
|
||||
Scalar* m_nzval; //array of nonzero values packed by column
|
||||
Index* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j
|
||||
Index* m_rowind; // Array of compressed row indices of rectangular supernodes
|
||||
Index* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j
|
||||
Index* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs
|
||||
Index* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode
|
||||
StorageIndex* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j
|
||||
StorageIndex* m_rowind; // Array of compressed row indices of rectangular supernodes
|
||||
StorageIndex* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j
|
||||
StorageIndex* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs
|
||||
StorageIndex* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode
|
||||
|
||||
private :
|
||||
};
|
||||
@ -182,9 +182,9 @@ template<typename Scalar, typename Index>
|
||||
class MappedSuperNodalMatrix<Scalar,Index>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const MappedSuperNodalMatrix& mat, Index outer)
|
||||
InnerIterator(const MappedSuperNodalMatrix& mat, Eigen::Index outer)
|
||||
: m_matrix(mat),
|
||||
m_outer(outer),
|
||||
m_outer(convert_index<Index>(outer)),
|
||||
m_supno(mat.colToSup()[outer]),
|
||||
m_idval(mat.colIndexPtr()[outer]),
|
||||
m_startidval(m_idval),
|
||||
@ -229,14 +229,14 @@ class MappedSuperNodalMatrix<Scalar,Index>::InnerIterator
|
||||
* \brief Solve with the supernode triangular matrix
|
||||
*
|
||||
*/
|
||||
template<typename Scalar, typename Index>
|
||||
template<typename Scalar, typename Index_>
|
||||
template<typename Dest>
|
||||
void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) const
|
||||
void MappedSuperNodalMatrix<Scalar,Index_>::solveInPlace( MatrixBase<Dest>&X) const
|
||||
{
|
||||
/* Explicit type conversion as the Index type of MatrixBase<Dest> may be wider than Index */
|
||||
eigen_assert(X.rows() <= NumTraits<Index>::highest());
|
||||
eigen_assert(X.cols() <= NumTraits<Index>::highest());
|
||||
Index n = Index(X.rows());
|
||||
// eigen_assert(X.rows() <= NumTraits<Index>::highest());
|
||||
// eigen_assert(X.cols() <= NumTraits<Index>::highest());
|
||||
Index n = int(X.rows());
|
||||
Index nrhs = Index(X.cols());
|
||||
const Scalar * Lval = valuePtr(); // Nonzero values
|
||||
Matrix<Scalar,Dynamic,Dynamic> work(n, nrhs); // working vector
|
||||
|
@ -21,7 +21,7 @@ namespace internal {
|
||||
template <typename SparseQRType> struct traits<SparseQRMatrixQReturnType<SparseQRType> >
|
||||
{
|
||||
typedef typename SparseQRType::MatrixType ReturnType;
|
||||
typedef typename ReturnType::Index Index;
|
||||
typedef typename ReturnType::StorageIndex StorageIndex;
|
||||
typedef typename ReturnType::StorageKind StorageKind;
|
||||
};
|
||||
template <typename SparseQRType> struct traits<SparseQRMatrixQTransposeReturnType<SparseQRType> >
|
||||
@ -73,11 +73,11 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
||||
typedef _OrderingType OrderingType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef SparseMatrix<Scalar,ColMajor,Index> QRMatrixType;
|
||||
typedef Matrix<Index, Dynamic, 1> IndexVector;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> QRMatrixType;
|
||||
typedef Matrix<StorageIndex, Dynamic, 1> IndexVector;
|
||||
typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
|
||||
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
|
||||
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
|
||||
public:
|
||||
SparseQR () : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
|
||||
{ }
|
||||
@ -109,11 +109,11 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
||||
|
||||
/** \returns the number of rows of the represented matrix.
|
||||
*/
|
||||
inline Index rows() const { return m_pmat.rows(); }
|
||||
inline StorageIndex rows() const { return m_pmat.rows(); }
|
||||
|
||||
/** \returns the number of columns of the represented matrix.
|
||||
*/
|
||||
inline Index cols() const { return m_pmat.cols();}
|
||||
inline StorageIndex cols() const { return m_pmat.cols();}
|
||||
|
||||
/** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization.
|
||||
*/
|
||||
@ -123,7 +123,7 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
||||
*
|
||||
* \sa setPivotThreshold()
|
||||
*/
|
||||
Index rank() const
|
||||
StorageIndex rank() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
|
||||
return m_nonzeropivots;
|
||||
@ -179,7 +179,7 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
||||
b = y;
|
||||
|
||||
// Solve with the triangular matrix R
|
||||
y.resize((std::max)(cols(),Index(y.rows())),y.cols());
|
||||
y.resize((std::max<Index>)(cols(),y.rows()),y.cols());
|
||||
y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));
|
||||
y.bottomRows(y.rows()-rank).setZero();
|
||||
|
||||
@ -260,7 +260,7 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
||||
PermutationType m_outputPerm_c; // The final column permutation
|
||||
RealScalar m_threshold; // Threshold to determine null Householder reflections
|
||||
bool m_useDefaultThreshold; // Use default threshold
|
||||
Index m_nonzeropivots; // Number of non zero pivots found
|
||||
StorageIndex m_nonzeropivots; // Number of non zero pivots found
|
||||
IndexVector m_etree; // Column elimination tree
|
||||
IndexVector m_firstRowElt; // First element in each row
|
||||
bool m_isQSorted; // whether Q is sorted or not
|
||||
@ -289,9 +289,9 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
|
||||
// Compute the column fill reducing ordering
|
||||
OrderingType ord;
|
||||
ord(matCpy, m_perm_c);
|
||||
Index n = mat.cols();
|
||||
Index m = mat.rows();
|
||||
Index diagSize = (std::min)(m,n);
|
||||
StorageIndex n = mat.cols();
|
||||
StorageIndex m = mat.rows();
|
||||
StorageIndex diagSize = (std::min)(m,n);
|
||||
|
||||
if (!m_perm_c.size())
|
||||
{
|
||||
@ -354,7 +354,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
||||
// otherwise directly use the input matrix
|
||||
//
|
||||
IndexVector originalOuterIndicesCpy;
|
||||
const Index *originalOuterIndices = mat.outerIndexPtr();
|
||||
const StorageIndex *originalOuterIndices = mat.outerIndexPtr();
|
||||
if(MatrixType::IsRowMajor)
|
||||
{
|
||||
originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1);
|
||||
@ -385,11 +385,11 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
||||
// Initialize the numerical permutation
|
||||
m_pivotperm.setIdentity(n);
|
||||
|
||||
Index nonzeroCol = 0; // Record the number of valid pivots
|
||||
StorageIndex nonzeroCol = 0; // Record the number of valid pivots
|
||||
m_Q.startVec(0);
|
||||
|
||||
// Left looking rank-revealing QR factorization: compute a column of R and Q at a time
|
||||
for (Index col = 0; col < n; ++col)
|
||||
for (StorageIndex col = 0; col < n; ++col)
|
||||
{
|
||||
mark.setConstant(-1);
|
||||
m_R.startVec(col);
|
||||
@ -405,12 +405,12 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
||||
// thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found.
|
||||
for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
|
||||
{
|
||||
Index curIdx = nonzeroCol;
|
||||
StorageIndex curIdx = nonzeroCol;
|
||||
if(itp) curIdx = itp.row();
|
||||
if(curIdx == nonzeroCol) found_diag = true;
|
||||
|
||||
// Get the nonzeros indexes of the current column of R
|
||||
Index st = m_firstRowElt(curIdx); // The traversal of the etree starts here
|
||||
StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here
|
||||
if (st < 0 )
|
||||
{
|
||||
m_lastError = "Empty row found during numerical factorization";
|
||||
@ -467,7 +467,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
||||
{
|
||||
for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
|
||||
{
|
||||
Index iQ = itq.row();
|
||||
StorageIndex iQ = itq.row();
|
||||
if (mark(iQ) != col)
|
||||
{
|
||||
Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q,
|
||||
@ -578,7 +578,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
|
||||
{
|
||||
typedef typename SparseQRType::QRMatrixType MatrixType;
|
||||
typedef typename SparseQRType::Scalar Scalar;
|
||||
typedef typename SparseQRType::Index Index;
|
||||
typedef typename SparseQRType::StorageIndex StorageIndex;
|
||||
// Get the references
|
||||
SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) :
|
||||
m_qr(qr),m_other(other),m_transpose(transpose) {}
|
||||
@ -634,7 +634,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
|
||||
template<typename SparseQRType>
|
||||
struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<SparseQRType> >
|
||||
{
|
||||
typedef typename SparseQRType::Index Index;
|
||||
typedef typename SparseQRType::StorageIndex StorageIndex;
|
||||
typedef typename SparseQRType::Scalar Scalar;
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
|
||||
explicit SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {}
|
||||
@ -647,8 +647,8 @@ struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<Sp
|
||||
{
|
||||
return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
|
||||
}
|
||||
inline Index rows() const { return m_qr.rows(); }
|
||||
inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }
|
||||
inline StorageIndex rows() const { return m_qr.rows(); }
|
||||
inline StorageIndex cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }
|
||||
// To use for operations with the transpose of Q
|
||||
SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const
|
||||
{
|
||||
|
@ -156,10 +156,10 @@ struct SluMatrix : SuperMatrix
|
||||
res.setScalarType<typename MatrixType::Scalar>();
|
||||
res.Mtype = SLU_GE;
|
||||
|
||||
res.nrow = mat.rows();
|
||||
res.ncol = mat.cols();
|
||||
res.nrow = internal::convert_index<int>(mat.rows());
|
||||
res.ncol = internal::convert_index<int>(mat.cols());
|
||||
|
||||
res.storage.lda = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride();
|
||||
res.storage.lda = internal::convert_index<int>(MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride());
|
||||
res.storage.values = (void*)(mat.data());
|
||||
return res;
|
||||
}
|
||||
@ -298,7 +298,7 @@ class SuperLUBase : public SparseSolverBase<Derived>
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<Scalar,Dynamic,1> Vector;
|
||||
typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
|
||||
typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
|
||||
@ -313,8 +313,8 @@ class SuperLUBase : public SparseSolverBase<Derived>
|
||||
clearFactors();
|
||||
}
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
inline StorageIndex rows() const { return m_matrix.rows(); }
|
||||
inline StorageIndex cols() const { return m_matrix.cols(); }
|
||||
|
||||
/** \returns a reference to the Super LU option object to configure the Super LU algorithms. */
|
||||
inline superlu_options_t& options() { return m_sluOptions; }
|
||||
@ -457,7 +457,7 @@ class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::RealScalar RealScalar;
|
||||
typedef typename Base::Index Index;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
typedef typename Base::IntRowVectorType IntRowVectorType;
|
||||
typedef typename Base::IntColVectorType IntColVectorType;
|
||||
typedef typename Base::LUMatrixType LUMatrixType;
|
||||
@ -616,8 +616,8 @@ void SuperLU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest>
|
||||
{
|
||||
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
|
||||
|
||||
const int size = m_matrix.rows();
|
||||
const int rhsCols = b.cols();
|
||||
const StorageIndex size = m_matrix.rows();
|
||||
const Index rhsCols = b.cols();
|
||||
eigen_assert(size==b.rows());
|
||||
|
||||
m_sluOptions.Trans = NOTRANS;
|
||||
|
@ -141,7 +141,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<Scalar,Dynamic,1> Vector;
|
||||
typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
|
||||
typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
|
||||
@ -164,8 +164,8 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
|
||||
if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
|
||||
}
|
||||
|
||||
inline Index rows() const { return m_copyMatrix.rows(); }
|
||||
inline Index cols() const { return m_copyMatrix.cols(); }
|
||||
inline StorageIndex rows() const { return m_copyMatrix.rows(); }
|
||||
inline StorageIndex cols() const { return m_copyMatrix.cols(); }
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
@ -279,7 +279,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
|
||||
void grapInput_impl(const InputMatrixType& mat, internal::true_type)
|
||||
{
|
||||
m_copyMatrix.resize(mat.rows(), mat.cols());
|
||||
if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() )
|
||||
if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::StorageIndex)!=sizeof(int) || !mat.isCompressed() )
|
||||
{
|
||||
// non supported input -> copy
|
||||
m_copyMatrix = mat;
|
||||
@ -397,7 +397,7 @@ template<typename MatrixType>
|
||||
template<typename BDerived,typename XDerived>
|
||||
bool UmfPackLU<MatrixType>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const
|
||||
{
|
||||
const int rhsCols = b.cols();
|
||||
Index rhsCols = b.cols();
|
||||
eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet");
|
||||
eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet");
|
||||
eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve");
|
||||
|
@ -11,7 +11,6 @@
|
||||
|
||||
template<typename MatrixType> void bandmatrix(const MatrixType& _m)
|
||||
{
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrixType;
|
||||
@ -62,8 +61,6 @@ using Eigen::internal::BandMatrix;
|
||||
|
||||
void test_bandmatrix()
|
||||
{
|
||||
typedef BandMatrix<float>::Index Index;
|
||||
|
||||
for(int i = 0; i < 10*g_repeat ; i++) {
|
||||
Index rows = internal::random<Index>(1,10);
|
||||
Index cols = internal::random<Index>(1,10);
|
||||
|
10
test/main.h
10
test/main.h
@ -373,11 +373,10 @@ bool test_is_equal(const T& actual, const U& expected)
|
||||
*/
|
||||
// Forward declaration to avoid ICC warning
|
||||
template<typename MatrixType>
|
||||
void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typename MatrixType::Index rows, typename MatrixType::Index cols, MatrixType& m);
|
||||
void createRandomPIMatrixOfRank(Index desired_rank, Index rows, Index cols, MatrixType& m);
|
||||
template<typename MatrixType>
|
||||
void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typename MatrixType::Index rows, typename MatrixType::Index cols, MatrixType& m)
|
||||
void createRandomPIMatrixOfRank(Index desired_rank, Index rows, Index cols, MatrixType& m)
|
||||
{
|
||||
typedef typename internal::traits<MatrixType>::Index Index;
|
||||
typedef typename internal::traits<MatrixType>::Scalar Scalar;
|
||||
enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime };
|
||||
|
||||
@ -414,11 +413,10 @@ void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typenam
|
||||
|
||||
// Forward declaration to avoid ICC warning
|
||||
template<typename PermutationVectorType>
|
||||
void randomPermutationVector(PermutationVectorType& v, typename PermutationVectorType::Index size);
|
||||
void randomPermutationVector(PermutationVectorType& v, Index size);
|
||||
template<typename PermutationVectorType>
|
||||
void randomPermutationVector(PermutationVectorType& v, typename PermutationVectorType::Index size)
|
||||
void randomPermutationVector(PermutationVectorType& v, Index size)
|
||||
{
|
||||
typedef typename PermutationVectorType::Index Index;
|
||||
typedef typename PermutationVectorType::Scalar Scalar;
|
||||
v.resize(size);
|
||||
for(Index i = 0; i < size; ++i) v(i) = Scalar(i);
|
||||
|
@ -35,8 +35,8 @@ bool equalsIdentity(const MatrixType& A)
|
||||
template<typename VectorType>
|
||||
void testVectorType(const VectorType& base)
|
||||
{
|
||||
typedef typename internal::traits<VectorType>::Index Index;
|
||||
typedef typename internal::traits<VectorType>::Scalar Scalar;
|
||||
typedef typename VectorType::Index Index;
|
||||
typedef typename VectorType::Scalar Scalar;
|
||||
|
||||
const Index size = base.size();
|
||||
|
||||
|
@ -22,7 +22,6 @@ template<typename MatrixType> void product(const MatrixType& m)
|
||||
/* this test covers the following files:
|
||||
Identity.h Product.h
|
||||
*/
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> RowVectorType;
|
||||
typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> ColVectorType;
|
||||
|
@ -13,11 +13,11 @@
|
||||
|
||||
template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& ref)
|
||||
{
|
||||
typedef typename SparseMatrixType::Index Index;
|
||||
typedef Matrix<Index,2,1> Vector2;
|
||||
typedef typename SparseMatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<StorageIndex,2,1> Vector2;
|
||||
|
||||
const Index rows = ref.rows();
|
||||
const Index cols = ref.cols();
|
||||
const StorageIndex rows = ref.rows();
|
||||
const StorageIndex cols = ref.cols();
|
||||
const Index inner = ref.innerSize();
|
||||
const Index outer = ref.outerSize();
|
||||
|
||||
@ -56,27 +56,27 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
VERIFY_IS_APPROX(m, refMat);
|
||||
|
||||
// test InnerIterators and Block expressions
|
||||
for (int t=0; t<10; ++t)
|
||||
for (Index t=0; t<10; ++t)
|
||||
{
|
||||
int j = internal::random<int>(0,cols-1);
|
||||
int i = internal::random<int>(0,rows-1);
|
||||
int w = internal::random<int>(1,cols-j-1);
|
||||
int h = internal::random<int>(1,rows-i-1);
|
||||
Index j = internal::random<Index>(0,cols-1);
|
||||
Index i = internal::random<Index>(0,rows-1);
|
||||
Index w = internal::random<Index>(1,cols-j-1);
|
||||
Index h = internal::random<Index>(1,rows-i-1);
|
||||
|
||||
VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w));
|
||||
for(int c=0; c<w; c++)
|
||||
for(Index c=0; c<w; c++)
|
||||
{
|
||||
VERIFY_IS_APPROX(m.block(i,j,h,w).col(c), refMat.block(i,j,h,w).col(c));
|
||||
for(int r=0; r<h; r++)
|
||||
for(Index r=0; r<h; r++)
|
||||
{
|
||||
// FIXME col().coeff() not implemented yet
|
||||
// VERIFY_IS_APPROX(m.block(i,j,h,w).col(c).coeff(r), refMat.block(i,j,h,w).col(c).coeff(r));
|
||||
}
|
||||
}
|
||||
for(int r=0; r<h; r++)
|
||||
for(Index r=0; r<h; r++)
|
||||
{
|
||||
VERIFY_IS_APPROX(m.block(i,j,h,w).row(r), refMat.block(i,j,h,w).row(r));
|
||||
for(int c=0; c<w; c++)
|
||||
for(Index c=0; c<w; c++)
|
||||
{
|
||||
// FIXME row().coeff() not implemented yet
|
||||
// VERIFY_IS_APPROX(m.block(i,j,h,w).row(r).coeff(c), refMat.block(i,j,h,w).row(r).coeff(c));
|
||||
@ -84,13 +84,13 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
}
|
||||
}
|
||||
|
||||
for(int c=0; c<cols; c++)
|
||||
for(Index c=0; c<cols; c++)
|
||||
{
|
||||
VERIFY_IS_APPROX(m.col(c) + m.col(c), (m + m).col(c));
|
||||
VERIFY_IS_APPROX(m.col(c) + m.col(c), refMat.col(c) + refMat.col(c));
|
||||
}
|
||||
|
||||
for(int r=0; r<rows; r++)
|
||||
for(Index r=0; r<rows; r++)
|
||||
{
|
||||
VERIFY_IS_APPROX(m.row(r) + m.row(r), (m + m).row(r));
|
||||
VERIFY_IS_APPROX(m.row(r) + m.row(r), refMat.row(r) + refMat.row(r));
|
||||
@ -153,7 +153,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
SparseMatrixType m2(rows,cols);
|
||||
VectorXi r(VectorXi::Constant(m2.outerSize(), ((mode%2)==0) ? int(m2.innerSize()) : std::max<int>(1,int(m2.innerSize())/8)));
|
||||
m2.reserve(r);
|
||||
for (int k=0; k<rows*cols; ++k)
|
||||
for (Index k=0; k<rows*cols; ++k)
|
||||
{
|
||||
Index i = internal::random<Index>(0,rows-1);
|
||||
Index j = internal::random<Index>(0,cols-1);
|
||||
@ -390,7 +390,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
|
||||
// test setFromTriplets
|
||||
{
|
||||
typedef Triplet<Scalar,Index> TripletType;
|
||||
typedef Triplet<Scalar,StorageIndex> TripletType;
|
||||
std::vector<TripletType> triplets;
|
||||
Index ntriplets = rows*cols;
|
||||
triplets.reserve(ntriplets);
|
||||
@ -398,8 +398,8 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
refMat.setZero();
|
||||
for(Index i=0;i<ntriplets;++i)
|
||||
{
|
||||
Index r = internal::random<Index>(0,rows-1);
|
||||
Index c = internal::random<Index>(0,cols-1);
|
||||
StorageIndex r = internal::random<StorageIndex>(0,rows-1);
|
||||
StorageIndex c = internal::random<StorageIndex>(0,cols-1);
|
||||
Scalar v = internal::random<Scalar>();
|
||||
triplets.push_back(TripletType(r,c,v));
|
||||
refMat(r,c) += v;
|
||||
@ -482,17 +482,17 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
|
||||
// test conservative resize
|
||||
{
|
||||
std::vector< std::pair<Index,Index> > inc;
|
||||
std::vector< std::pair<StorageIndex,StorageIndex> > inc;
|
||||
if(rows > 3 && cols > 2)
|
||||
inc.push_back(std::pair<Index,Index>(-3,-2));
|
||||
inc.push_back(std::pair<Index,Index>(0,0));
|
||||
inc.push_back(std::pair<Index,Index>(3,2));
|
||||
inc.push_back(std::pair<Index,Index>(3,0));
|
||||
inc.push_back(std::pair<Index,Index>(0,3));
|
||||
inc.push_back(std::pair<StorageIndex,StorageIndex>(-3,-2));
|
||||
inc.push_back(std::pair<StorageIndex,StorageIndex>(0,0));
|
||||
inc.push_back(std::pair<StorageIndex,StorageIndex>(3,2));
|
||||
inc.push_back(std::pair<StorageIndex,StorageIndex>(3,0));
|
||||
inc.push_back(std::pair<StorageIndex,StorageIndex>(0,3));
|
||||
|
||||
for(size_t i = 0; i< inc.size(); i++) {
|
||||
Index incRows = inc[i].first;
|
||||
Index incCols = inc[i].second;
|
||||
StorageIndex incRows = inc[i].first;
|
||||
StorageIndex incCols = inc[i].second;
|
||||
SparseMatrixType m1(rows, cols);
|
||||
DenseMatrix refMat1 = DenseMatrix::Zero(rows, cols);
|
||||
initSparse<Scalar>(density, refMat1, m1);
|
||||
@ -527,28 +527,28 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
|
||||
|
||||
template<typename SparseMatrixType>
|
||||
void big_sparse_triplet(typename SparseMatrixType::Index rows, typename SparseMatrixType::Index cols, double density) {
|
||||
typedef typename SparseMatrixType::Index Index;
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
typedef Triplet<Scalar,Index> TripletType;
|
||||
std::vector<TripletType> triplets;
|
||||
double nelements = density * rows*cols;
|
||||
VERIFY(nelements>=0 && nelements < NumTraits<Index>::highest());
|
||||
Index ntriplets = Index(nelements);
|
||||
triplets.reserve(ntriplets);
|
||||
Scalar sum = Scalar(0);
|
||||
for(Index i=0;i<ntriplets;++i)
|
||||
{
|
||||
Index r = internal::random<Index>(0,rows-1);
|
||||
Index c = internal::random<Index>(0,cols-1);
|
||||
Scalar v = internal::random<Scalar>();
|
||||
triplets.push_back(TripletType(r,c,v));
|
||||
sum += v;
|
||||
}
|
||||
SparseMatrixType m(rows,cols);
|
||||
m.setFromTriplets(triplets.begin(), triplets.end());
|
||||
VERIFY(m.nonZeros() <= ntriplets);
|
||||
VERIFY_IS_APPROX(sum, m.sum());
|
||||
void big_sparse_triplet(Index rows, Index cols, double density) {
|
||||
typedef typename SparseMatrixType::StorageIndex StorageIndex;
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
typedef Triplet<Scalar,Index> TripletType;
|
||||
std::vector<TripletType> triplets;
|
||||
double nelements = density * rows*cols;
|
||||
VERIFY(nelements>=0 && nelements < NumTraits<StorageIndex>::highest());
|
||||
Index ntriplets = Index(nelements);
|
||||
triplets.reserve(ntriplets);
|
||||
Scalar sum = Scalar(0);
|
||||
for(Index i=0;i<ntriplets;++i)
|
||||
{
|
||||
Index r = internal::random<Index>(0,rows-1);
|
||||
Index c = internal::random<Index>(0,cols-1);
|
||||
Scalar v = internal::random<Scalar>();
|
||||
triplets.push_back(TripletType(r,c,v));
|
||||
sum += v;
|
||||
}
|
||||
SparseMatrixType m(rows,cols);
|
||||
m.setFromTriplets(triplets.begin(), triplets.end());
|
||||
VERIFY(m.nonZeros() <= ntriplets);
|
||||
VERIFY_IS_APPROX(sum, m.sum());
|
||||
}
|
||||
|
||||
|
||||
|
@ -11,15 +11,13 @@
|
||||
|
||||
template<int OtherStorage, typename SparseMatrixType> void sparse_permutations(const SparseMatrixType& ref)
|
||||
{
|
||||
typedef typename SparseMatrixType::Index Index;
|
||||
|
||||
const Index rows = ref.rows();
|
||||
const Index cols = ref.cols();
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
typedef typename SparseMatrixType::Index Index;
|
||||
typedef SparseMatrix<Scalar, OtherStorage, Index> OtherSparseMatrixType;
|
||||
typedef typename SparseMatrixType::StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<Scalar, OtherStorage, StorageIndex> OtherSparseMatrixType;
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
|
||||
typedef Matrix<Index,Dynamic,1> VectorI;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
|
||||
double density = (std::max)(8./(rows*cols), 0.01);
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
template<typename SparseMatrixType> void sparse_product()
|
||||
{
|
||||
typedef typename SparseMatrixType::Index Index;
|
||||
typedef typename SparseMatrixType::StorageIndex Index;
|
||||
Index n = 100;
|
||||
const Index rows = internal::random<Index>(1,n);
|
||||
const Index cols = internal::random<Index>(1,n);
|
||||
|
@ -15,7 +15,7 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A,
|
||||
{
|
||||
typedef typename Solver::MatrixType Mat;
|
||||
typedef typename Mat::Scalar Scalar;
|
||||
typedef typename Mat::Index Index;
|
||||
typedef typename Mat::StorageIndex StorageIndex;
|
||||
|
||||
DenseRhs refX = dA.lu().solve(db);
|
||||
{
|
||||
@ -60,7 +60,7 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A,
|
||||
|
||||
x.setZero();
|
||||
// test with Map
|
||||
MappedSparseMatrix<Scalar,Mat::Options,Index> Am(A.rows(), A.cols(), A.nonZeros(), const_cast<Index*>(A.outerIndexPtr()), const_cast<Index*>(A.innerIndexPtr()), const_cast<Scalar*>(A.valuePtr()));
|
||||
MappedSparseMatrix<Scalar,Mat::Options,StorageIndex> Am(A.rows(), A.cols(), A.nonZeros(), const_cast<StorageIndex*>(A.outerIndexPtr()), const_cast<StorageIndex*>(A.innerIndexPtr()), const_cast<Scalar*>(A.valuePtr()));
|
||||
solver.compute(Am);
|
||||
if (solver.info() != Success)
|
||||
{
|
||||
@ -95,7 +95,7 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A,
|
||||
// test uncompressed inputs
|
||||
{
|
||||
Mat A2 = A;
|
||||
A2.reserve((ArrayXf::Random(A.outerSize())+2).template cast<typename Mat::Index>().eval());
|
||||
A2.reserve((ArrayXf::Random(A.outerSize())+2).template cast<typename Mat::StorageIndex>().eval());
|
||||
solver.compute(A2);
|
||||
Rhs x = solver.solve(b);
|
||||
VERIFY(x.isApprox(refX,test_precision<Scalar>()));
|
||||
|
@ -284,7 +284,7 @@ public:
|
||||
using Base::_solve_impl;
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
|
||||
|
@ -220,7 +220,7 @@ namespace Eigen {
|
||||
using Base::_solve_impl;
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
|
||||
|
@ -31,7 +31,7 @@ class KroneckerProductBase : public ReturnByValue<Derived>
|
||||
protected:
|
||||
typedef typename Traits::Lhs Lhs;
|
||||
typedef typename Traits::Rhs Rhs;
|
||||
typedef typename Traits::Index Index;
|
||||
typedef typename Traits::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
/*! \brief Constructor. */
|
||||
@ -39,8 +39,8 @@ class KroneckerProductBase : public ReturnByValue<Derived>
|
||||
: m_A(A), m_B(B)
|
||||
{}
|
||||
|
||||
inline Index rows() const { return m_A.rows() * m_B.rows(); }
|
||||
inline Index cols() const { return m_A.cols() * m_B.cols(); }
|
||||
inline StorageIndex rows() const { return m_A.rows() * m_B.rows(); }
|
||||
inline StorageIndex cols() const { return m_A.cols() * m_B.cols(); }
|
||||
|
||||
/*!
|
||||
* This overrides ReturnByValue::coeff because this function is
|
||||
@ -48,8 +48,8 @@ class KroneckerProductBase : public ReturnByValue<Derived>
|
||||
*/
|
||||
Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
return m_A.coeff(typename Lhs::Index(row / m_B.rows()), typename Lhs::Index(col / m_B.cols())) *
|
||||
m_B.coeff(typename Rhs::Index(row % m_B.rows()), typename Rhs::Index(col % m_B.cols()));
|
||||
return m_A.coeff(row / m_B.rows(), col / m_B.cols()) *
|
||||
m_B.coeff(row % m_B.rows(), col % m_B.cols());
|
||||
}
|
||||
|
||||
/*!
|
||||
@ -59,7 +59,7 @@ class KroneckerProductBase : public ReturnByValue<Derived>
|
||||
Scalar coeff(Index i) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
|
||||
return m_A.coeff(typename Lhs::Index(i / m_A.size())) * m_B.coeff(typename Rhs::Index(i % m_A.size()));
|
||||
return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size());
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -134,7 +134,6 @@ template<typename Lhs, typename Rhs>
|
||||
template<typename Dest>
|
||||
void KroneckerProduct<Lhs,Rhs>::evalTo(Dest& dst) const
|
||||
{
|
||||
typedef typename Base::Index Index;
|
||||
const int BlockRows = Rhs::RowsAtCompileTime,
|
||||
BlockCols = Rhs::ColsAtCompileTime;
|
||||
const Index Br = m_B.rows(),
|
||||
@ -148,12 +147,8 @@ template<typename Lhs, typename Rhs>
|
||||
template<typename Dest>
|
||||
void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
|
||||
{
|
||||
typedef typename Dest::Index DestIndex;
|
||||
const typename Rhs::Index Br = m_B.rows(),
|
||||
Bc = m_B.cols();
|
||||
eigen_assert(this->rows() <= NumTraits<DestIndex>::highest());
|
||||
eigen_assert(this->cols() <= NumTraits<DestIndex>::highest());
|
||||
dst.resize(DestIndex(this->rows()), DestIndex(this->cols()));
|
||||
Index Br = m_B.rows(), Bc = m_B.cols();
|
||||
dst.resize(this->rows(), this->cols());
|
||||
dst.resizeNonZeros(0);
|
||||
|
||||
// 1 - evaluate the operands if needed:
|
||||
@ -170,13 +165,14 @@ void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
|
||||
|
||||
// compute number of non-zeros per innervectors of dst
|
||||
{
|
||||
// TODO VectorXi is not necessarily big enough!
|
||||
VectorXi nnzA = VectorXi::Zero(Dest::IsRowMajor ? m_A.rows() : m_A.cols());
|
||||
for (typename Lhs::Index kA=0; kA < m_A.outerSize(); ++kA)
|
||||
for (Index kA=0; kA < m_A.outerSize(); ++kA)
|
||||
for (LhsInnerIterator itA(lhs1,kA); itA; ++itA)
|
||||
nnzA(Dest::IsRowMajor ? itA.row() : itA.col())++;
|
||||
|
||||
VectorXi nnzB = VectorXi::Zero(Dest::IsRowMajor ? m_B.rows() : m_B.cols());
|
||||
for (typename Rhs::Index kB=0; kB < m_B.outerSize(); ++kB)
|
||||
for (Index kB=0; kB < m_B.outerSize(); ++kB)
|
||||
for (RhsInnerIterator itB(rhs1,kB); itB; ++itB)
|
||||
nnzB(Dest::IsRowMajor ? itB.row() : itB.col())++;
|
||||
|
||||
@ -184,17 +180,16 @@ void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
|
||||
dst.reserve(VectorXi::Map(nnzAB.data(), nnzAB.size()));
|
||||
}
|
||||
|
||||
for (typename Lhs::Index kA=0; kA < m_A.outerSize(); ++kA)
|
||||
for (Index kA=0; kA < m_A.outerSize(); ++kA)
|
||||
{
|
||||
for (typename Rhs::Index kB=0; kB < m_B.outerSize(); ++kB)
|
||||
for (Index kB=0; kB < m_B.outerSize(); ++kB)
|
||||
{
|
||||
for (LhsInnerIterator itA(lhs1,kA); itA; ++itA)
|
||||
{
|
||||
for (RhsInnerIterator itB(rhs1,kB); itB; ++itB)
|
||||
{
|
||||
const DestIndex
|
||||
i = DestIndex(itA.row() * Br + itB.row()),
|
||||
j = DestIndex(itA.col() * Bc + itB.col());
|
||||
Index i = itA.row() * Br + itB.row(),
|
||||
j = itA.col() * Bc + itB.col();
|
||||
dst.insert(i,j) = itA.value() * itB.value();
|
||||
}
|
||||
}
|
||||
@ -210,7 +205,7 @@ struct traits<KroneckerProduct<_Lhs,_Rhs> >
|
||||
typedef typename remove_all<_Lhs>::type Lhs;
|
||||
typedef typename remove_all<_Rhs>::type Rhs;
|
||||
typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
|
||||
typedef typename promote_index_type<typename Lhs::Index, typename Rhs::Index>::type Index;
|
||||
typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
|
||||
|
||||
enum {
|
||||
Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
|
||||
@ -230,7 +225,7 @@ struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
|
||||
typedef typename remove_all<_Rhs>::type Rhs;
|
||||
typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
|
||||
typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind, scalar_product_op<typename Lhs::Scalar, typename Rhs::Scalar> >::ret StorageKind;
|
||||
typedef typename promote_index_type<typename Lhs::Index, typename Rhs::Index>::type Index;
|
||||
typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
|
||||
|
||||
enum {
|
||||
LhsFlags = Lhs::Flags,
|
||||
@ -249,7 +244,7 @@ struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
|
||||
CoeffReadCost = Dynamic
|
||||
};
|
||||
|
||||
typedef SparseMatrix<Scalar, 0, Index> ReturnType;
|
||||
typedef SparseMatrix<Scalar, 0, StorageIndex> ReturnType;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
@ -51,7 +51,7 @@ namespace Eigen {
|
||||
* Dynamic : block size known at runtime
|
||||
* a numeric number : fixed-size block known at compile time
|
||||
*/
|
||||
template<typename _Scalar, int _BlockAtCompileTime=Dynamic, int _Options=ColMajor, typename _Index=int> class BlockSparseMatrix;
|
||||
template<typename _Scalar, int _BlockAtCompileTime=Dynamic, int _Options=ColMajor, typename _StorageIndex=int> class BlockSparseMatrix;
|
||||
|
||||
template<typename BlockSparseMatrixT> class BlockSparseMatrixView;
|
||||
|
||||
@ -280,14 +280,14 @@ class BlockSparseTimeDenseProduct
|
||||
BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&);
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _Index>
|
||||
class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_BlockAtCompileTime, _Options,_Index> >
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
|
||||
class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_BlockAtCompileTime, _Options,_StorageIndex> >
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef _Index Index;
|
||||
typedef typename internal::nested<BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index> >::type Nested;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef typename internal::nested<BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex> >::type Nested;
|
||||
|
||||
enum {
|
||||
Options = _Options,
|
||||
@ -303,7 +303,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
typedef Matrix<Scalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockScalar;
|
||||
typedef Matrix<RealScalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockRealScalar;
|
||||
typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType;
|
||||
typedef BlockSparseMatrix<Scalar, BlockSize, IsColMajor ? ColMajor : RowMajor, Index> PlainObject;
|
||||
typedef BlockSparseMatrix<Scalar, BlockSize, IsColMajor ? ColMajor : RowMajor, StorageIndex> PlainObject;
|
||||
public:
|
||||
// Default constructor
|
||||
BlockSparseMatrix()
|
||||
@ -412,17 +412,17 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
m_nonzeros = 0;
|
||||
|
||||
// First, compute the number of nonzero blocks and their locations
|
||||
for(Index bj = 0; bj < m_outerBSize; ++bj)
|
||||
for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
|
||||
{
|
||||
// Browse each outer block and compute the structure
|
||||
std::vector<bool> nzblocksFlag(m_innerBSize,false); // Record the existing blocks
|
||||
blockPattern.startVec(bj);
|
||||
for(Index j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
|
||||
for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
|
||||
{
|
||||
typename MatrixType::InnerIterator it_spmat(spmat, j);
|
||||
for(; it_spmat; ++it_spmat)
|
||||
{
|
||||
Index bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
|
||||
StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
|
||||
if(!nzblocksFlag[bi])
|
||||
{
|
||||
// Save the index of this nonzero block
|
||||
@ -439,21 +439,21 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
// Allocate the internal arrays
|
||||
setBlockStructure(blockPattern);
|
||||
|
||||
for(Index nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
|
||||
for(Index bj = 0; bj < m_outerBSize; ++bj)
|
||||
for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
|
||||
for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
|
||||
{
|
||||
// Now copy the values
|
||||
for(Index j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
|
||||
for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
|
||||
{
|
||||
// Browse the outer block column by column (for column-major matrices)
|
||||
typename MatrixType::InnerIterator it_spmat(spmat, j);
|
||||
for(; it_spmat; ++it_spmat)
|
||||
{
|
||||
Index idx = 0; // Position of this block in the column block
|
||||
Index bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
|
||||
StorageIndex idx = 0; // Position of this block in the column block
|
||||
StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
|
||||
// Go to the inner block where this element belongs to
|
||||
while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks
|
||||
Index idxVal;// Get the right position in the array of values for this element
|
||||
StorageIndex idxVal;// Get the right position in the array of values for this element
|
||||
if(m_blockSize == Dynamic)
|
||||
{
|
||||
// Offset from all blocks before ...
|
||||
@ -503,8 +503,8 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
// Browse the block pattern and set up the various pointers
|
||||
m_outerIndex[0] = 0;
|
||||
if(m_blockSize == Dynamic) m_blockPtr[0] = 0;
|
||||
for(Index nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
|
||||
for(Index bj = 0; bj < m_outerBSize; ++bj)
|
||||
for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
|
||||
for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
|
||||
{
|
||||
//Browse each outer block
|
||||
|
||||
@ -519,9 +519,9 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
std::sort(nzBlockIdx.begin(), nzBlockIdx.end());
|
||||
|
||||
// Now, fill block indices and (eventually) pointers to blocks
|
||||
for(Index idx = 0; idx < nzBlockIdx.size(); ++idx)
|
||||
for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx)
|
||||
{
|
||||
Index offset = m_outerIndex[bj]+idx; // offset in m_indices
|
||||
StorageIndex offset = m_outerIndex[bj]+idx; // offset in m_indices
|
||||
m_indices[offset] = nzBlockIdx[idx];
|
||||
if(m_blockSize == Dynamic)
|
||||
m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj);
|
||||
@ -535,7 +535,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
/**
|
||||
* \brief Set the number of rows and columns blocks
|
||||
*/
|
||||
inline void resize(Index brow, Index bcol)
|
||||
inline void resize(StorageIndex brow, StorageIndex bcol)
|
||||
{
|
||||
m_innerBSize = IsColMajor ? brow : bcol;
|
||||
m_outerBSize = IsColMajor ? bcol : brow;
|
||||
@ -546,7 +546,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
*
|
||||
* Call this only for fixed-size blocks
|
||||
*/
|
||||
inline void setBlockSize(Index blockSize)
|
||||
inline void setBlockSize(StorageIndex blockSize)
|
||||
{
|
||||
m_blockSize = blockSize;
|
||||
}
|
||||
@ -568,8 +568,8 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
|
||||
m_outerBSize = outerBlocks.size();
|
||||
// starting index of blocks... cumulative sums
|
||||
m_innerOffset = new Index[m_innerBSize+1];
|
||||
m_outerOffset = new Index[m_outerBSize+1];
|
||||
m_innerOffset = new StorageIndex[m_innerBSize+1];
|
||||
m_outerOffset = new StorageIndex[m_outerBSize+1];
|
||||
m_innerOffset[0] = 0;
|
||||
m_outerOffset[0] = 0;
|
||||
std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]);
|
||||
@ -577,8 +577,8 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
|
||||
// Compute the total number of nonzeros
|
||||
m_nonzeros = 0;
|
||||
for(Index bj = 0; bj < m_outerBSize; ++bj)
|
||||
for(Index bi = 0; bi < m_innerBSize; ++bi)
|
||||
for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
|
||||
for(StorageIndex bi = 0; bi < m_innerBSize; ++bi)
|
||||
m_nonzeros += outerBlocks[bj] * innerBlocks[bi];
|
||||
|
||||
}
|
||||
@ -593,13 +593,13 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
* is computed in setBlockLayout() for variable-size blocks
|
||||
* \sa setBlockSize()
|
||||
*/
|
||||
inline void reserve(const Index nonzerosblocks)
|
||||
inline void reserve(const StorageIndex nonzerosblocks)
|
||||
{
|
||||
eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
|
||||
"TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first");
|
||||
|
||||
//FIXME Should free if already allocated
|
||||
m_outerIndex = new Index[m_outerBSize+1];
|
||||
m_outerIndex = new StorageIndex[m_outerBSize+1];
|
||||
|
||||
m_nonzerosblocks = nonzerosblocks;
|
||||
if(m_blockSize != Dynamic)
|
||||
@ -610,9 +610,9 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
else
|
||||
{
|
||||
// m_nonzeros is already computed in setBlockLayout()
|
||||
m_blockPtr = new Index[m_nonzerosblocks+1];
|
||||
m_blockPtr = new StorageIndex[m_nonzerosblocks+1];
|
||||
}
|
||||
m_indices = new Index[m_nonzerosblocks+1];
|
||||
m_indices = new StorageIndex[m_nonzerosblocks+1];
|
||||
m_values = new Scalar[m_nonzeros];
|
||||
}
|
||||
|
||||
@ -669,7 +669,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
}
|
||||
// Allocate member arrays
|
||||
if(m_blockSize == Dynamic) setBlockLayout(rowBlocks, colBlocks);
|
||||
Index nzblocks = nzblock_outer.sum();
|
||||
StorageIndex nzblocks = nzblock_outer.sum();
|
||||
reserve(nzblocks);
|
||||
|
||||
// Temporary markers
|
||||
@ -678,7 +678,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
// Setup outer index pointers and markers
|
||||
m_outerIndex[0] = 0;
|
||||
if (m_blockSize == Dynamic) m_blockPtr[0] = 0;
|
||||
for(Index bj = 0; bj < m_outerBSize; ++bj)
|
||||
for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
|
||||
{
|
||||
m_outerIndex[bj+1] = m_outerIndex[bj] + nzblock_outer(bj);
|
||||
block_id(bj) = m_outerIndex[bj];
|
||||
@ -691,11 +691,11 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
// Fill the matrix
|
||||
for(InputIterator it(begin); it!=end; ++it)
|
||||
{
|
||||
Index outer = IsColMajor ? it->col() : it->row();
|
||||
Index inner = IsColMajor ? it->row() : it->col();
|
||||
StorageIndex outer = IsColMajor ? it->col() : it->row();
|
||||
StorageIndex inner = IsColMajor ? it->row() : it->col();
|
||||
m_indices[block_id(outer)] = inner;
|
||||
Index block_size = it->value().rows()*it->value().cols();
|
||||
Index nz_marker = blockPtr(block_id[outer]);
|
||||
StorageIndex block_size = it->value().rows()*it->value().cols();
|
||||
StorageIndex nz_marker = blockPtr(block_id[outer]);
|
||||
memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar));
|
||||
if(m_blockSize == Dynamic)
|
||||
{
|
||||
@ -735,7 +735,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
/**
|
||||
* \returns the number of rows
|
||||
*/
|
||||
inline Index rows() const
|
||||
inline StorageIndex rows() const
|
||||
{
|
||||
// return blockRows();
|
||||
return (IsColMajor ? innerSize() : outerSize());
|
||||
@ -744,58 +744,58 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
/**
|
||||
* \returns the number of cols
|
||||
*/
|
||||
inline Index cols() const
|
||||
inline StorageIndex cols() const
|
||||
{
|
||||
// return blockCols();
|
||||
return (IsColMajor ? outerSize() : innerSize());
|
||||
}
|
||||
|
||||
inline Index innerSize() const
|
||||
inline StorageIndex innerSize() const
|
||||
{
|
||||
if(m_blockSize == Dynamic) return m_innerOffset[m_innerBSize];
|
||||
else return (m_innerBSize * m_blockSize) ;
|
||||
}
|
||||
|
||||
inline Index outerSize() const
|
||||
inline StorageIndex outerSize() const
|
||||
{
|
||||
if(m_blockSize == Dynamic) return m_outerOffset[m_outerBSize];
|
||||
else return (m_outerBSize * m_blockSize) ;
|
||||
}
|
||||
/** \returns the number of rows grouped by blocks */
|
||||
inline Index blockRows() const
|
||||
inline StorageIndex blockRows() const
|
||||
{
|
||||
return (IsColMajor ? m_innerBSize : m_outerBSize);
|
||||
}
|
||||
/** \returns the number of columns grouped by blocks */
|
||||
inline Index blockCols() const
|
||||
inline StorageIndex blockCols() const
|
||||
{
|
||||
return (IsColMajor ? m_outerBSize : m_innerBSize);
|
||||
}
|
||||
|
||||
inline Index outerBlocks() const { return m_outerBSize; }
|
||||
inline Index innerBlocks() const { return m_innerBSize; }
|
||||
inline StorageIndex outerBlocks() const { return m_outerBSize; }
|
||||
inline StorageIndex innerBlocks() const { return m_innerBSize; }
|
||||
|
||||
/** \returns the block index where outer belongs to */
|
||||
inline Index outerToBlock(Index outer) const
|
||||
inline StorageIndex outerToBlock(StorageIndex outer) const
|
||||
{
|
||||
eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS");
|
||||
|
||||
if(m_blockSize != Dynamic)
|
||||
return (outer / m_blockSize); // Integer division
|
||||
|
||||
Index b_outer = 0;
|
||||
StorageIndex b_outer = 0;
|
||||
while(m_outerOffset[b_outer] <= outer) ++b_outer;
|
||||
return b_outer - 1;
|
||||
}
|
||||
/** \returns the block index where inner belongs to */
|
||||
inline Index innerToBlock(Index inner) const
|
||||
inline StorageIndex innerToBlock(StorageIndex inner) const
|
||||
{
|
||||
eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS");
|
||||
|
||||
if(m_blockSize != Dynamic)
|
||||
return (inner / m_blockSize); // Integer division
|
||||
|
||||
Index b_inner = 0;
|
||||
StorageIndex b_inner = 0;
|
||||
while(m_innerOffset[b_inner] <= inner) ++b_inner;
|
||||
return b_inner - 1;
|
||||
}
|
||||
@ -803,16 +803,16 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
/**
|
||||
*\returns a reference to the (i,j) block as an Eigen Dense Matrix
|
||||
*/
|
||||
Ref<BlockScalar> coeffRef(Index brow, Index bcol)
|
||||
Ref<BlockScalar> coeffRef(StorageIndex brow, StorageIndex bcol)
|
||||
{
|
||||
eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
|
||||
eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS");
|
||||
|
||||
Index rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
|
||||
Index csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
|
||||
Index inner = IsColMajor ? brow : bcol;
|
||||
Index outer = IsColMajor ? bcol : brow;
|
||||
Index offset = m_outerIndex[outer];
|
||||
StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
|
||||
StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
|
||||
StorageIndex inner = IsColMajor ? brow : bcol;
|
||||
StorageIndex outer = IsColMajor ? bcol : brow;
|
||||
StorageIndex offset = m_outerIndex[outer];
|
||||
while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner)
|
||||
offset++;
|
||||
if(m_indices[offset] == inner)
|
||||
@ -829,16 +829,16 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
/**
|
||||
* \returns the value of the (i,j) block as an Eigen Dense Matrix
|
||||
*/
|
||||
Map<const BlockScalar> coeff(Index brow, Index bcol) const
|
||||
Map<const BlockScalar> coeff(StorageIndex brow, StorageIndex bcol) const
|
||||
{
|
||||
eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
|
||||
eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS");
|
||||
|
||||
Index rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
|
||||
Index csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
|
||||
Index inner = IsColMajor ? brow : bcol;
|
||||
Index outer = IsColMajor ? bcol : brow;
|
||||
Index offset = m_outerIndex[outer];
|
||||
StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
|
||||
StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
|
||||
StorageIndex inner = IsColMajor ? brow : bcol;
|
||||
StorageIndex outer = IsColMajor ? bcol : brow;
|
||||
StorageIndex offset = m_outerIndex[outer];
|
||||
while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++;
|
||||
if(m_indices[offset] == inner)
|
||||
{
|
||||
@ -857,23 +857,23 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
}
|
||||
|
||||
/** \returns the number of nonzero blocks */
|
||||
inline Index nonZerosBlocks() const { return m_nonzerosblocks; }
|
||||
inline StorageIndex nonZerosBlocks() const { return m_nonzerosblocks; }
|
||||
/** \returns the total number of nonzero elements, including eventual explicit zeros in blocks */
|
||||
inline Index nonZeros() const { return m_nonzeros; }
|
||||
inline StorageIndex nonZeros() const { return m_nonzeros; }
|
||||
|
||||
inline BlockScalarReturnType *valuePtr() {return static_cast<BlockScalarReturnType *>(m_values);}
|
||||
// inline Scalar *valuePtr(){ return m_values; }
|
||||
inline Index *innerIndexPtr() {return m_indices; }
|
||||
inline const Index *innerIndexPtr() const {return m_indices; }
|
||||
inline Index *outerIndexPtr() {return m_outerIndex; }
|
||||
inline const Index* outerIndexPtr() const {return m_outerIndex; }
|
||||
inline StorageIndex *innerIndexPtr() {return m_indices; }
|
||||
inline const StorageIndex *innerIndexPtr() const {return m_indices; }
|
||||
inline StorageIndex *outerIndexPtr() {return m_outerIndex; }
|
||||
inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; }
|
||||
|
||||
/** \brief for compatibility purposes with the SparseMatrix class */
|
||||
inline bool isCompressed() const {return true;}
|
||||
/**
|
||||
* \returns the starting index of the bi row block
|
||||
*/
|
||||
inline Index blockRowsIndex(Index bi) const
|
||||
inline StorageIndex blockRowsIndex(StorageIndex bi) const
|
||||
{
|
||||
return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi);
|
||||
}
|
||||
@ -881,26 +881,26 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
/**
|
||||
* \returns the starting index of the bj col block
|
||||
*/
|
||||
inline Index blockColsIndex(Index bj) const
|
||||
inline StorageIndex blockColsIndex(Index bj) const
|
||||
{
|
||||
return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj);
|
||||
}
|
||||
|
||||
inline Index blockOuterIndex(Index bj) const
|
||||
inline StorageIndex blockOuterIndex(Index bj) const
|
||||
{
|
||||
return (m_blockSize == Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize);
|
||||
}
|
||||
inline Index blockInnerIndex(Index bi) const
|
||||
inline StorageIndex blockInnerIndex(Index bi) const
|
||||
{
|
||||
return (m_blockSize == Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize);
|
||||
}
|
||||
|
||||
// Not needed ???
|
||||
inline Index blockInnerSize(Index bi) const
|
||||
inline StorageIndex blockInnerSize(Index bi) const
|
||||
{
|
||||
return (m_blockSize == Dynamic) ? (m_innerOffset[bi+1] - m_innerOffset[bi]) : m_blockSize;
|
||||
}
|
||||
inline Index blockOuterSize(Index bj) const
|
||||
inline StorageIndex blockOuterSize(Index bj) const
|
||||
{
|
||||
return (m_blockSize == Dynamic) ? (m_outerOffset[bj+1]- m_outerOffset[bj]) : m_blockSize;
|
||||
}
|
||||
@ -917,7 +917,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
|
||||
friend std::ostream & operator << (std::ostream & s, const BlockSparseMatrix& m)
|
||||
{
|
||||
for (Index j = 0; j < m.outerBlocks(); ++j)
|
||||
for (StorageIndex j = 0; j < m.outerBlocks(); ++j)
|
||||
{
|
||||
BlockInnerIterator itb(m, j);
|
||||
for(; itb; ++itb)
|
||||
@ -933,7 +933,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
/**
|
||||
* \returns the starting position of the block <id> in the array of values
|
||||
*/
|
||||
Index blockPtr(Index id) const
|
||||
StorageIndex blockPtr(Index id) const
|
||||
{
|
||||
if(m_blockSize == Dynamic) return m_blockPtr[id];
|
||||
else return id * m_blockSize * m_blockSize;
|
||||
@ -955,21 +955,21 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
// Insert a block at a particular location... need to make a room for that
|
||||
Map<BlockScalar> insert(Index brow, Index bcol);
|
||||
|
||||
Index m_innerBSize; // Number of block rows
|
||||
Index m_outerBSize; // Number of block columns
|
||||
Index *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1)
|
||||
Index *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1)
|
||||
Index m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize)
|
||||
Index m_nonzeros; // Total nonzeros elements
|
||||
StorageIndex m_innerBSize; // Number of block rows
|
||||
StorageIndex m_outerBSize; // Number of block columns
|
||||
StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1)
|
||||
StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1)
|
||||
StorageIndex m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize)
|
||||
StorageIndex m_nonzeros; // Total nonzeros elements
|
||||
Scalar *m_values; //Values stored block column after block column (size m_nonzeros)
|
||||
Index *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks
|
||||
Index *m_indices; //Inner block indices, size m_nonzerosblocks ... OK
|
||||
Index *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK
|
||||
Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1
|
||||
StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks
|
||||
StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK
|
||||
StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK
|
||||
StorageIndex m_blockSize; // Size of a block for fixed-size blocks, otherwise -1
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _Index>
|
||||
class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::BlockInnerIterator
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
|
||||
class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator
|
||||
{
|
||||
public:
|
||||
|
||||
@ -977,7 +977,7 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::BlockIn
|
||||
Flags = _Options
|
||||
};
|
||||
|
||||
BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer)
|
||||
BlockInnerIterator(const BlockSparseMatrix& mat, const StorageIndex outer)
|
||||
: m_mat(mat),m_outer(outer),
|
||||
m_id(mat.m_outerIndex[outer]),
|
||||
m_end(mat.m_outerIndex[outer+1])
|
||||
@ -997,27 +997,27 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::BlockIn
|
||||
rows(),cols());
|
||||
}
|
||||
// Block inner index
|
||||
inline Index index() const {return m_mat.m_indices[m_id]; }
|
||||
inline Index outer() const { return m_outer; }
|
||||
inline StorageIndex index() const {return m_mat.m_indices[m_id]; }
|
||||
inline StorageIndex outer() const { return m_outer; }
|
||||
// block row index
|
||||
inline Index row() const {return index(); }
|
||||
inline StorageIndex row() const {return index(); }
|
||||
// block column index
|
||||
inline Index col() const {return outer(); }
|
||||
inline StorageIndex col() const {return outer(); }
|
||||
// FIXME Number of rows in the current block
|
||||
inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; }
|
||||
inline StorageIndex rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; }
|
||||
// Number of columns in the current block ...
|
||||
inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;}
|
||||
inline StorageIndex cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;}
|
||||
inline operator bool() const { return (m_id < m_end); }
|
||||
|
||||
protected:
|
||||
const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, Index>& m_mat;
|
||||
const Index m_outer;
|
||||
Index m_id;
|
||||
Index m_end;
|
||||
const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat;
|
||||
const StorageIndex m_outer;
|
||||
StorageIndex m_id;
|
||||
StorageIndex m_end;
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _Index>
|
||||
class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::InnerIterator
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
|
||||
class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const BlockSparseMatrix& mat, Index outer)
|
||||
@ -1055,23 +1055,23 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::InnerIt
|
||||
{
|
||||
return itb.valueRef().coeff(m_id - m_start, m_offset);
|
||||
}
|
||||
inline Index index() const { return m_id; }
|
||||
inline Index outer() const {return m_outer; }
|
||||
inline Index col() const {return outer(); }
|
||||
inline Index row() const { return index();}
|
||||
inline StorageIndex index() const { return m_id; }
|
||||
inline StorageIndex outer() const {return m_outer; }
|
||||
inline StorageIndex col() const {return outer(); }
|
||||
inline StorageIndex row() const { return index();}
|
||||
inline operator bool() const
|
||||
{
|
||||
return itb;
|
||||
}
|
||||
protected:
|
||||
const BlockSparseMatrix& m_mat;
|
||||
const Index m_outer;
|
||||
const Index m_outerB;
|
||||
const StorageIndex m_outer;
|
||||
const StorageIndex m_outerB;
|
||||
BlockInnerIterator itb; // Iterator through the blocks
|
||||
const Index m_offset; // Position of this column in the block
|
||||
Index m_start; // starting inner index of this block
|
||||
Index m_id; // current inner index in the block
|
||||
Index m_end; // starting inner index of the next block
|
||||
const StorageIndex m_offset; // Position of this column in the block
|
||||
StorageIndex m_start; // starting inner index of this block
|
||||
StorageIndex m_id; // current inner index in the block
|
||||
StorageIndex m_end; // starting inner index of the next block
|
||||
|
||||
};
|
||||
} // end namespace Eigen
|
||||
|
@ -37,7 +37,7 @@ template<typename _Scalar, int _Options, typename _Index>
|
||||
struct traits<DynamicSparseMatrix<_Scalar, _Options, _Index> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef _Index StorageIndex;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@ -70,21 +70,21 @@ template<typename _Scalar, int _Options, typename _Index>
|
||||
|
||||
protected:
|
||||
|
||||
typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
|
||||
typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0), StorageIndex> TransposedSparseMatrix;
|
||||
|
||||
Index m_innerSize;
|
||||
std::vector<internal::CompressedStorage<Scalar,Index> > m_data;
|
||||
StorageIndex m_innerSize;
|
||||
std::vector<internal::CompressedStorage<Scalar,StorageIndex> > m_data;
|
||||
|
||||
public:
|
||||
|
||||
inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
|
||||
inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
|
||||
inline Index innerSize() const { return m_innerSize; }
|
||||
inline Index outerSize() const { return static_cast<Index>(m_data.size()); }
|
||||
inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
|
||||
inline StorageIndex rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
|
||||
inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
|
||||
inline StorageIndex innerSize() const { return m_innerSize; }
|
||||
inline StorageIndex outerSize() const { return convert_index(m_data.size()); }
|
||||
inline StorageIndex innerNonZeros(Index j) const { return m_data[j].size(); }
|
||||
|
||||
std::vector<internal::CompressedStorage<Scalar,Index> >& _data() { return m_data; }
|
||||
const std::vector<internal::CompressedStorage<Scalar,Index> >& _data() const { return m_data; }
|
||||
std::vector<internal::CompressedStorage<Scalar,StorageIndex> >& _data() { return m_data; }
|
||||
const std::vector<internal::CompressedStorage<Scalar,StorageIndex> >& _data() const { return m_data; }
|
||||
|
||||
/** \returns the coefficient value at given position \a row, \a col
|
||||
* This operation involes a log(rho*outer_size) binary search.
|
||||
@ -117,11 +117,11 @@ template<typename _Scalar, int _Options, typename _Index>
|
||||
}
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
Index nonZeros() const
|
||||
StorageIndex nonZeros() const
|
||||
{
|
||||
Index res = 0;
|
||||
StorageIndex res = 0;
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
res += static_cast<Index>(m_data[j].size());
|
||||
res += convert_index(m_data[j].size());
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -197,7 +197,7 @@ template<typename _Scalar, int _Options, typename _Index>
|
||||
void resize(Index rows, Index cols)
|
||||
{
|
||||
const Index outerSize = IsRowMajor ? rows : cols;
|
||||
m_innerSize = IsRowMajor ? cols : rows;
|
||||
m_innerSize = convert_index(IsRowMajor ? cols : rows);
|
||||
setZero();
|
||||
if (Index(m_data.size()) != outerSize)
|
||||
{
|
||||
|
@ -154,7 +154,7 @@ template<typename SparseMatrixType,
|
||||
class RandomSetter
|
||||
{
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
typedef typename SparseMatrixType::Index Index;
|
||||
typedef typename SparseMatrixType::StorageIndex StorageIndex;
|
||||
|
||||
struct ScalarWrapper
|
||||
{
|
||||
@ -292,10 +292,10 @@ class RandomSetter
|
||||
/** \returns a reference to the coefficient at given coordinates \a row, \a col */
|
||||
Scalar& operator() (Index row, Index col)
|
||||
{
|
||||
const Index outer = SetterRowMajor ? row : col;
|
||||
const Index inner = SetterRowMajor ? col : row;
|
||||
const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map
|
||||
const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet
|
||||
const StorageIndex outer = internal::convert_index<StorageIndex>(SetterRowMajor ? row : col);
|
||||
const StorageIndex inner = internal::convert_index<StorageIndex>(SetterRowMajor ? col : row);
|
||||
const StorageIndex outerMajor = outer >> OuterPacketBits; // index of the packet/map
|
||||
const StorageIndex outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet
|
||||
const KeyType key = (KeyType(outerMinor)<<m_keyBitsOffset) | inner;
|
||||
return m_hashmaps[outerMajor][key].value;
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ struct processTriangularMatrix<MatrixType,0>
|
||||
{
|
||||
static void run(MatrixType& m, MatrixType& T, const MatrixType& U)
|
||||
{
|
||||
typedef typename MatrixType::Index Index;
|
||||
const Index size = m.cols();
|
||||
|
||||
for (Index i=0; i < size; ++i) {
|
||||
|
@ -49,7 +49,6 @@ bool test_random_setter(DynamicSparseMatrix<T>& sm, const DenseType& ref, const
|
||||
|
||||
template<typename SparseMatrixType> void sparse_extra(const SparseMatrixType& ref)
|
||||
{
|
||||
typedef typename SparseMatrixType::Index Index;
|
||||
const Index rows = ref.rows();
|
||||
const Index cols = ref.cols();
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
|
Loading…
x
Reference in New Issue
Block a user