mirror of
https://gitlab.com/libeigen/eigen.git
synced 2024-12-15 07:10:37 +08:00
bug #875: remove broken SparseMatrixBase::nonZeros and introduce a nonZerosEstimate() method to sparse evaluators for internal uses.
Factorize some code in SparseCompressedBase.
This commit is contained in:
parent
39dcd01b0a
commit
3105986e71
@ -30,16 +30,16 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
|
||||
|
||||
std::memset(mask,0,sizeof(bool)*rows);
|
||||
|
||||
typename evaluator<Lhs>::type lhsEval(lhs);
|
||||
typename evaluator<Rhs>::type rhsEval(rhs);
|
||||
|
||||
// estimate the number of non zero entries
|
||||
// given a rhs column containing Y non zeros, we assume that the respective Y columns
|
||||
// of the lhs differs in average of one non zeros, thus the number of non zeros for
|
||||
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
|
||||
// per column of the lhs.
|
||||
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
|
||||
Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
|
||||
|
||||
typename evaluator<Lhs>::type lhsEval(lhs);
|
||||
typename evaluator<Rhs>::type rhsEval(rhs);
|
||||
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
|
||||
|
||||
res.setZero();
|
||||
res.reserve(Index(estimated_nnz_prod));
|
||||
|
@ -90,7 +90,8 @@ class sparse_matrix_block_impl
|
||||
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
|
||||
public:
|
||||
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
|
||||
typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
|
||||
_EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
|
||||
protected:
|
||||
typedef typename Base::IndexVector IndexVector;
|
||||
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
|
||||
@ -198,20 +199,9 @@ public:
|
||||
{ return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; }
|
||||
|
||||
inline const StorageIndex* innerNonZeroPtr() const
|
||||
{ return isCompressed() ? 0 : m_matrix.innerNonZeroPtr(); }
|
||||
{ return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
|
||||
inline StorageIndex* innerNonZeroPtr()
|
||||
{ return isCompressed() ? 0 : m_matrix.const_cast_derived().innerNonZeroPtr(); }
|
||||
|
||||
Index nonZeros() const
|
||||
{
|
||||
if(m_matrix.isCompressed())
|
||||
return ( (m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
|
||||
- (m_matrix.outerIndexPtr()[m_outerStart]));
|
||||
else if(m_outerSize.value()==0)
|
||||
return 0;
|
||||
else
|
||||
return Map<const IndexVector>(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
|
||||
}
|
||||
{ return isCompressed() ? 0 : (m_matrix.const_cast_derived().innerNonZeroPtr()+m_outerStart); }
|
||||
|
||||
bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
|
||||
|
||||
@ -233,7 +223,7 @@ public:
|
||||
const Scalar& lastCoeff() const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
|
||||
eigen_assert(nonZeros()>0);
|
||||
eigen_assert(Base::nonZeros()>0);
|
||||
if(m_matrix.isCompressed())
|
||||
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
|
||||
else
|
||||
@ -417,6 +407,9 @@ public:
|
||||
protected:
|
||||
friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
|
||||
friend class ReverseInnerIterator;
|
||||
friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
|
||||
|
||||
Index nonZeros() const { return Dynamic; }
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
|
||||
|
||||
@ -548,9 +541,16 @@ struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBa
|
||||
explicit unary_evaluator(const XprType& op)
|
||||
: m_argImpl(op.nestedExpression()), m_block(op)
|
||||
{}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
Index nnz = m_block.nonZeros();
|
||||
if(nnz<0)
|
||||
return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
|
||||
return nnz;
|
||||
}
|
||||
|
||||
protected:
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
|
||||
typename evaluator<ArgType>::nestedType m_argImpl;
|
||||
const XprType &m_block;
|
||||
|
@ -35,6 +35,25 @@ class SparseCompressedBase
|
||||
class InnerIterator;
|
||||
class ReverseInnerIterator;
|
||||
|
||||
protected:
|
||||
typedef typename Base::IndexVector IndexVector;
|
||||
Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
|
||||
const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
|
||||
|
||||
public:
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline Index nonZeros() const
|
||||
{
|
||||
if(isCompressed())
|
||||
return outerIndexPtr()[derived().outerSize()]-outerIndexPtr()[0];
|
||||
else if(derived().outerSize()==0)
|
||||
return 0;
|
||||
else
|
||||
return innerNonZeros().sum();
|
||||
|
||||
}
|
||||
|
||||
/** \returns a const pointer to the array of values.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \sa innerIndexPtr(), outerIndexPtr() */
|
||||
@ -165,6 +184,10 @@ struct evaluator<SparseCompressedBase<Derived> >
|
||||
evaluator() : m_matrix(0) {}
|
||||
explicit evaluator(const Derived &mat) : m_matrix(&mat) {}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_matrix->nonZeros();
|
||||
}
|
||||
|
||||
operator Derived&() { return m_matrix->const_cast_derived(); }
|
||||
operator const Derived&() const { return *m_matrix; }
|
||||
|
||||
|
@ -121,6 +121,10 @@ public:
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs())
|
||||
{ }
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_lhsImpl.nonZerosEstimate() + m_rhsImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
@ -198,6 +202,10 @@ public:
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs())
|
||||
{ }
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return (std::min)(m_lhsImpl.nonZerosEstimate(), m_rhsImpl.nonZerosEstimate());
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
@ -243,7 +251,7 @@ public:
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
|
||||
|
||||
|
||||
protected:
|
||||
const LhsEvaluator &m_lhsEval;
|
||||
RhsIterator m_rhsIter;
|
||||
@ -262,6 +270,10 @@ public:
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs())
|
||||
{ }
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_rhsImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
@ -308,7 +320,7 @@ public:
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
|
||||
|
||||
|
||||
protected:
|
||||
LhsIterator m_lhsIter;
|
||||
const RhsEvaluator &m_rhsEval;
|
||||
@ -327,6 +339,10 @@ public:
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs())
|
||||
{ }
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_lhsImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
|
@ -30,6 +30,10 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
|
||||
};
|
||||
|
||||
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) {}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_argImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
protected:
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
|
@ -105,9 +105,6 @@ class SparseMapBase<Derived,ReadOnlyAccessors>
|
||||
return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
|
||||
}
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline Index nonZeros() const { return m_nnz; }
|
||||
|
||||
inline SparseMapBase(Index rows, Index cols, Index nnz, IndexPointer outerIndexPtr, IndexPointer innerIndexPtr,
|
||||
ScalarPointer valuePtr, IndexPointer innerNonZerosPtr = 0)
|
||||
: m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
|
||||
|
@ -95,6 +95,7 @@ class SparseMatrix
|
||||
public:
|
||||
typedef SparseCompressedBase<SparseMatrix> Base;
|
||||
using Base::isCompressed;
|
||||
using Base::nonZeros;
|
||||
_EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
|
||||
@ -122,9 +123,6 @@ class SparseMatrix
|
||||
StorageIndex* m_outerIndex;
|
||||
StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
|
||||
Storage m_data;
|
||||
|
||||
Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
|
||||
const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
|
||||
|
||||
public:
|
||||
|
||||
@ -252,14 +250,6 @@ class SparseMatrix
|
||||
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
|
||||
}
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline Index nonZeros() const
|
||||
{
|
||||
if(m_innerNonZeros)
|
||||
return innerNonZeros().sum();
|
||||
return convert_index(Index(m_data.size()));
|
||||
}
|
||||
|
||||
/** Preallocates \a reserveSize non zeros.
|
||||
*
|
||||
* Precondition: the matrix must be in compressed mode. */
|
||||
|
@ -149,9 +149,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
/** \returns the number of coefficients, which is \a rows()*cols().
|
||||
* \sa rows(), cols(). */
|
||||
inline Index size() const { return rows() * cols(); }
|
||||
/** \returns the number of nonzero coefficients which is in practice the number
|
||||
* of stored coefficients. */
|
||||
inline Index nonZeros() const { return derived().nonZeros(); }
|
||||
/** \returns true if either the number of rows or the number of columns is equal to 1.
|
||||
* In other words, this function returns
|
||||
* \code rows()==1 || cols()==1 \endcode
|
||||
|
@ -33,14 +33,6 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
|
||||
// allocate a temporary buffer
|
||||
AmbiVector<Scalar,StorageIndex> tempVector(rows);
|
||||
|
||||
// estimate the number of non zero entries
|
||||
// given a rhs column containing Y non zeros, we assume that the respective Y columns
|
||||
// of the lhs differs in average of one non zeros, thus the number of non zeros for
|
||||
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
|
||||
// per column of the lhs.
|
||||
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
|
||||
Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
|
||||
|
||||
// mimics a resizeByInnerOuter:
|
||||
if(ResultType::IsRowMajor)
|
||||
res.resize(cols, rows);
|
||||
@ -49,6 +41,14 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
|
||||
|
||||
typename evaluator<Lhs>::type lhsEval(lhs);
|
||||
typename evaluator<Rhs>::type rhsEval(rhs);
|
||||
|
||||
// estimate the number of non zero entries
|
||||
// given a rhs column containing Y non zeros, we assume that the respective Y columns
|
||||
// of the lhs differs in average of one non zeros, thus the number of non zeros for
|
||||
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
|
||||
// per column of the lhs.
|
||||
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
|
||||
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
|
||||
|
||||
res.reserve(estimated_nnz_prod);
|
||||
double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
|
||||
|
@ -40,15 +40,11 @@ namespace internal {
|
||||
};
|
||||
}
|
||||
|
||||
// Implement nonZeros() for transpose. I'm not sure that's the best approach for that.
|
||||
// Perhaps it should be implemented in Transpose<> itself.
|
||||
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
|
||||
: public internal::SparseTransposeImpl<MatrixType>
|
||||
{
|
||||
protected:
|
||||
typedef internal::SparseTransposeImpl<MatrixType> Base;
|
||||
public:
|
||||
inline Index nonZeros() const { return Base::derived().nestedExpression().nonZeros(); }
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
@ -61,6 +57,10 @@ struct unary_evaluator<Transpose<ArgType>, IteratorBased>
|
||||
typedef typename evaluator<ArgType>::ReverseInnerIterator EvalReverseIterator;
|
||||
public:
|
||||
typedef Transpose<ArgType> XprType;
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_argImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
class InnerIterator : public EvalIterator
|
||||
{
|
||||
|
@ -50,13 +50,6 @@ protected:
|
||||
|
||||
template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
|
||||
template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
|
||||
|
||||
inline Index nonZeros() const {
|
||||
// FIXME HACK number of nonZeros is required for product logic
|
||||
// this returns only an upper bound (but should be OK for most purposes)
|
||||
return derived().nestedExpression().nonZeros();
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
@ -191,6 +184,10 @@ public:
|
||||
|
||||
explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()) {}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_argImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
class InnerIterator : public EvalIterator
|
||||
{
|
||||
typedef EvalIterator Base;
|
||||
|
@ -442,6 +442,10 @@ struct evaluator<SparseVector<_Scalar,_Options,_Index> >
|
||||
|
||||
explicit evaluator(const SparseVectorType &mat) : m_matrix(mat) {}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_matrix.nonZeros();
|
||||
}
|
||||
|
||||
operator SparseVectorType&() { return m_matrix.const_cast_derived(); }
|
||||
operator const SparseVectorType&() const { return m_matrix; }
|
||||
|
||||
|
@ -67,6 +67,9 @@ template<typename SparseMatrixType> void sparse_product()
|
||||
VERIFY_IS_APPROX(m4 = m2*m3/s1, refMat4 = refMat2*refMat3/s1);
|
||||
VERIFY_IS_APPROX(m4 = m2*m3*s1, refMat4 = refMat2*refMat3*s1);
|
||||
VERIFY_IS_APPROX(m4 = s2*m2*m3*s1, refMat4 = s2*refMat2*refMat3*s1);
|
||||
VERIFY_IS_APPROX(m4 = (m2+m2)*m3, refMat4 = (refMat2+refMat2)*refMat3);
|
||||
VERIFY_IS_APPROX(m4 = m2*m3.leftCols(cols/2), refMat4 = refMat2*refMat3.leftCols(cols/2));
|
||||
VERIFY_IS_APPROX(m4 = m2*(m3+m3).leftCols(cols/2), refMat4 = refMat2*(refMat3+refMat3).leftCols(cols/2));
|
||||
|
||||
VERIFY_IS_APPROX(m4=(m2*m3).pruned(0), refMat4=refMat2*refMat3);
|
||||
VERIFY_IS_APPROX(m4=(m2t.transpose()*m3).pruned(0), refMat4=refMat2t.transpose()*refMat3);
|
||||
@ -194,7 +197,7 @@ template<typename SparseMatrixType> void sparse_product()
|
||||
VERIFY_IS_APPROX(d3=d1*m2.transpose(), refM3=d1*refM2.transpose());
|
||||
}
|
||||
|
||||
// test self-adjoint and traingular-view products
|
||||
// test self-adjoint and triangular-view products
|
||||
{
|
||||
DenseMatrix b = DenseMatrix::Random(rows, rows);
|
||||
DenseMatrix x = DenseMatrix::Random(rows, rows);
|
||||
|
Loading…
Reference in New Issue
Block a user