Implement evaluator for sparse-selfadjoint products

This commit is contained in:
Gael Guennebaud 2014-07-22 09:32:40 +02:00
parent 9b729f93a1
commit 2a251ffab0
9 changed files with 352 additions and 145 deletions

View File

@ -53,11 +53,12 @@ struct Sparse {};
#include "src/SparseCore/SparseSparseProductWithPruning.h"
#include "src/SparseCore/SparseProduct.h"
#include "src/SparseCore/SparseDenseProduct.h"
#include "src/SparseCore/SparseSelfAdjointView.h"
#ifndef EIGEN_TEST_EVALUATORS
#include "src/SparseCore/SparsePermutation.h"
#include "src/SparseCore/SparseFuzzy.h"
#include "src/SparseCore/SparseTriangularView.h"
#include "src/SparseCore/SparseSelfAdjointView.h"
#include "src/SparseCore/TriangularSolver.h"
#endif

View File

@ -62,14 +62,14 @@ template<typename Lhs, typename Rhs> struct product_type
typedef typename remove_all<Lhs>::type _Lhs;
typedef typename remove_all<Rhs>::type _Rhs;
enum {
MaxRows = _Lhs::MaxRowsAtCompileTime,
Rows = _Lhs::RowsAtCompileTime,
MaxCols = _Rhs::MaxColsAtCompileTime,
Cols = _Rhs::ColsAtCompileTime,
MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime,
_Rhs::MaxRowsAtCompileTime),
Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime,
_Rhs::RowsAtCompileTime)
MaxRows = traits<_Lhs>::MaxRowsAtCompileTime,
Rows = traits<_Lhs>::RowsAtCompileTime,
MaxCols = traits<_Rhs>::MaxColsAtCompileTime,
Cols = traits<_Rhs>::ColsAtCompileTime,
MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::MaxColsAtCompileTime,
traits<_Rhs>::MaxRowsAtCompileTime),
Depth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::ColsAtCompileTime,
traits<_Rhs>::RowsAtCompileTime)
};
// the splitting into different lines of code here, introducing the _select enums and the typedef below,

View File

@ -58,24 +58,26 @@ struct traits<Product<Lhs, Rhs, Option> >
{
typedef typename remove_all<Lhs>::type LhsCleaned;
typedef typename remove_all<Rhs>::type RhsCleaned;
typedef traits<LhsCleaned> LhsTraits;
typedef traits<RhsCleaned> RhsTraits;
typedef MatrixXpr XprKind;
typedef typename product_result_scalar<LhsCleaned,RhsCleaned>::Scalar Scalar;
typedef typename product_promote_storage_type<typename traits<LhsCleaned>::StorageKind,
typename traits<RhsCleaned>::StorageKind,
typedef typename product_promote_storage_type<typename LhsTraits::StorageKind,
typename RhsTraits::StorageKind,
internal::product_type<Lhs,Rhs>::ret>::ret StorageKind;
typedef typename promote_index_type<typename traits<LhsCleaned>::Index,
typename traits<RhsCleaned>::Index>::type Index;
typedef typename promote_index_type<typename LhsTraits::Index,
typename RhsTraits::Index>::type Index;
enum {
RowsAtCompileTime = LhsCleaned::RowsAtCompileTime,
ColsAtCompileTime = RhsCleaned::ColsAtCompileTime,
MaxRowsAtCompileTime = LhsCleaned::MaxRowsAtCompileTime,
MaxColsAtCompileTime = RhsCleaned::MaxColsAtCompileTime,
RowsAtCompileTime = LhsTraits::RowsAtCompileTime,
ColsAtCompileTime = RhsTraits::ColsAtCompileTime,
MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime,
MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime,
// FIXME: only needed by GeneralMatrixMatrixTriangular
InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsCleaned::ColsAtCompileTime, RhsCleaned::RowsAtCompileTime),
InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime),
#ifndef EIGEN_TEST_EVALUATORS
// dummy, for evaluators unit test only
@ -84,8 +86,8 @@ struct traits<Product<Lhs, Rhs, Option> >
// The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator.
Flags = ( MaxRowsAtCompileTime==1
|| ((LhsCleaned::Flags&NoPreferredStorageOrderBit) && (RhsCleaned::Flags&RowMajorBit))
|| ((RhsCleaned::Flags&NoPreferredStorageOrderBit) && (LhsCleaned::Flags&RowMajorBit)) )
|| ((LhsTraits::Flags&NoPreferredStorageOrderBit) && (RhsTraits::Flags&RowMajorBit))
|| ((RhsTraits::Flags&NoPreferredStorageOrderBit) && (LhsTraits::Flags&RowMajorBit)) )
? RowMajorBit : (MaxColsAtCompileTime==1 ? 0 : NoPreferredStorageOrderBit)
};
};

View File

@ -328,6 +328,7 @@ struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, Dyn
#endif // EIGEN_TEST_EVALUATORS
#ifdef EIGEN_ENABLE_EVALUATORS
// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
// in the future selfadjoint-ness should be defined by the expression traits
// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)

View File

@ -127,6 +127,7 @@ template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
{
// TODO use the evaluator mechanism
other.derived().evalTo(derived());
return derived();
}
@ -135,6 +136,7 @@ template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
{
// TODO use the evaluator mechanism
other.evalTo(derived());
return derived();
}
@ -143,6 +145,7 @@ template<typename Derived>
template<typename OtherDerived>
inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
// FIXME, by default sparse evaluation do not alias, so we should be able to bypass the generic call_assignment
internal::call_assignment/*_no_alias*/(derived(), other.derived());
return derived();
}

View File

@ -448,45 +448,6 @@ protected:
PlainObject m_result;
};
// template<typename Lhs, typename Rhs, bool Transpose, typename LhsIterator>
// class sparse_dense_outer_product_iterator : public LhsIterator
// {
// typedef typename SparseDenseOuterProduct::Index Index;
// public:
// template<typename XprEval>
// EIGEN_STRONG_INLINE InnerIterator(const XprEval& prod, Index outer)
// : LhsIterator(prod.lhs(), 0),
// m_outer(outer), m_empty(false), m_factor(get(prod.rhs(), outer, typename internal::traits<Rhs>::StorageKind() ))
// {}
//
// inline Index outer() const { return m_outer; }
// inline Index row() const { return Transpose ? m_outer : Base::index(); }
// inline Index col() const { return Transpose ? Base::index() : m_outer; }
//
// inline Scalar value() const { return Base::value() * m_factor; }
// inline operator bool() const { return Base::operator bool() && !m_empty; }
//
// protected:
// Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense()) const
// {
// return rhs.coeff(outer);
// }
//
// Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse())
// {
// typename Traits::_RhsNested::InnerIterator it(rhs, outer);
// if (it && it.index()==0 && it.value()!=Scalar(0))
// return it.value();
// m_empty = true;
// return Scalar(0);
// }
//
// Index m_outer;
// bool m_empty;
// Scalar m_factor;
// };
template<typename LhsT, typename RhsT, bool Transpose>
struct sparse_dense_outer_product_evaluator
{

View File

@ -664,7 +664,11 @@ class SparseMatrix
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
#ifndef EIGEN_TEST_EVALUATORS
*this = other;
#else
Base::operator=(other);
#endif
}
/** Copy constructor (it performs a deep copy) */

View File

@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@ -12,13 +12,23 @@
namespace Eigen {
#ifndef EIGEN_TEST_EVALUATORS
template<typename Lhs, typename Rhs, int Mode>
class SparseSelfAdjointTimeDenseProduct;
template<typename Lhs, typename Rhs, int Mode>
class DenseTimeSparseSelfAdjointProduct;
#endif // #ifndef EIGEN_TEST_EVALUATORS
/** \ingroup SparseCore_Module
* \class SparseSelfAdjointView
*
* \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
*
* \param MatrixType the type of the dense matrix storing the coefficients
* \param UpLo can be either \c #Lower or \c #Upper
* \param Mode can be either \c #Lower or \c #Upper
*
* This class is an expression of a sefladjoint matrix from a triangular part of a matrix
* with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
@ -26,37 +36,33 @@ namespace Eigen {
*
* \sa SparseMatrixBase::selfadjointView()
*/
template<typename Lhs, typename Rhs, int UpLo>
class SparseSelfAdjointTimeDenseProduct;
template<typename Lhs, typename Rhs, int UpLo>
class DenseTimeSparseSelfAdjointProduct;
namespace internal {
template<typename MatrixType, unsigned int UpLo>
struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
template<typename MatrixType, unsigned int Mode>
struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
};
template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
template<int UpLo,typename MatrixType,int DestOrder>
template<int Mode,typename MatrixType,int DestOrder>
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
}
template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
: public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
: public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
{
public:
enum { Mode = _Mode };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef Matrix<Index,Dynamic,1> VectorI;
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
{
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
@ -74,40 +80,76 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
#ifndef EIGEN_TEST_EVALUATORS
template<typename OtherDerived>
SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>
operator*(const SparseMatrixBase<OtherDerived>& rhs) const
{
return SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>(*this, rhs.derived());
}
#else
template<typename OtherDerived>
Product<SparseSelfAdjointView, OtherDerived>
operator*(const SparseMatrixBase<OtherDerived>& rhs) const
{
return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
}
#endif
/** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
*
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
#ifndef EIGEN_TEST_EVALUATORS
template<typename OtherDerived> friend
SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject >
operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
return SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject>(lhs.derived(), rhs);
}
#else // EIGEN_TEST_EVALUATORS
template<typename OtherDerived> friend
Product<OtherDerived, SparseSelfAdjointView>
operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
}
#endif // EIGEN_TEST_EVALUATORS
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
#ifndef EIGEN_TEST_EVALUATORS
template<typename OtherDerived>
SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,Mode>
operator*(const MatrixBase<OtherDerived>& rhs) const
{
return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,Mode>(m_matrix, rhs.derived());
}
#else
template<typename OtherDerived>
Product<SparseSelfAdjointView,OtherDerived>
operator*(const MatrixBase<OtherDerived>& rhs) const
{
return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
}
#endif
/** Efficient dense vector/matrix times sparse self-adjoint matrix product */
#ifndef EIGEN_TEST_EVALUATORS
template<typename OtherDerived> friend
DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,Mode>
operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,Mode>(lhs.derived(), rhs.m_matrix);
}
#else
template<typename OtherDerived> friend
Product<OtherDerived,SparseSelfAdjointView>
operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
}
#endif
/** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
* \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
@ -123,30 +165,31 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
/** \internal triggered by sparse_matrix = SparseSelfadjointView; */
template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
{
internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
internal::permute_symm_to_fullsymm<Mode>(m_matrix, _dest);
}
template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
{
// TODO directly evaluate into _dest;
SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
internal::permute_symm_to_fullsymm<Mode>(m_matrix, tmp);
_dest = tmp;
}
/** \returns an expression of P H P^-1 */
SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
#ifndef EIGEN_TEST_EVALUATORS
SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
{
return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
}
template<typename SrcMatrixType,int SrcUpLo>
SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
template<typename SrcMatrixType,int SrcMode>
SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
{
permutedMatrix.evalTo(*this);
return *this;
}
#endif // EIGEN_TEST_EVALUATORS
SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
{
@ -154,22 +197,18 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
return *this = src.twistedBy(pnull);
}
template<typename SrcMatrixType,unsigned int SrcUpLo>
SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcUpLo>& src)
template<typename SrcMatrixType,unsigned int SrcMode>
SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
{
PermutationMatrix<Dynamic> pnull;
return *this = src.twistedBy(pnull);
}
// const SparseLLT<PlainObject, UpLo> llt() const;
// const SparseLDLT<PlainObject, UpLo> ldlt() const;
protected:
typename MatrixType::Nested m_matrix;
mutable VectorI m_countPerRow;
mutable VectorI m_countPerCol;
//mutable VectorI m_countPerRow;
//mutable VectorI m_countPerCol;
};
/***************************************************************************
@ -177,15 +216,15 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
***************************************************************************/
template<typename Derived>
template<unsigned int UpLo>
const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
template<unsigned int Mode>
const SparseSelfAdjointView<Derived, Mode> SparseMatrixBase<Derived>::selfadjointView() const
{
return derived();
}
template<typename Derived>
template<unsigned int UpLo>
SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
template<unsigned int Mode>
SparseSelfAdjointView<Derived, Mode> SparseMatrixBase<Derived>::selfadjointView()
{
return derived();
}
@ -194,16 +233,16 @@ SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView(
* Implementation of SparseSelfAdjointView methods
***************************************************************************/
template<typename MatrixType, unsigned int UpLo>
template<typename MatrixType, unsigned int Mode>
template<typename DerivedU>
SparseSelfAdjointView<MatrixType,UpLo>&
SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
SparseSelfAdjointView<MatrixType,Mode>&
SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
{
SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
if(alpha==Scalar(0))
m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
m_matrix.const_cast_derived() = tmp.template triangularView<Mode>();
else
m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
m_matrix.const_cast_derived() += alpha * tmp.template triangularView<Mode>();
return *this;
}
@ -212,18 +251,19 @@ SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<Derive
* Implementation of sparse self-adjoint time dense matrix
***************************************************************************/
#ifndef EIGEN_TEST_EVALUATORS
namespace internal {
template<typename Lhs, typename Rhs, int UpLo>
struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
: traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
template<typename Lhs, typename Rhs, int Mode>
struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,Mode> >
: traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,Mode>, Lhs, Rhs> >
{
typedef Dense StorageKind;
};
}
template<typename Lhs, typename Rhs, int UpLo>
template<typename Lhs, typename Rhs, int Mode>
class SparseSelfAdjointTimeDenseProduct
: public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
: public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,Mode>, Lhs, Rhs>
{
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
@ -241,9 +281,9 @@ class SparseSelfAdjointTimeDenseProduct
enum {
LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
ProcessFirstHalf =
((UpLo&(Upper|Lower))==(Upper|Lower))
|| ( (UpLo&Upper) && !LhsIsRowMajor)
|| ( (UpLo&Lower) && LhsIsRowMajor),
((Mode&(Upper|Lower))==(Upper|Lower))
|| ( (Mode&Upper) && !LhsIsRowMajor)
|| ( (Mode&Lower) && LhsIsRowMajor),
ProcessSecondHalf = !ProcessFirstHalf
};
for (typename _Lhs::Index j=0; j<m_lhs.outerSize(); ++j)
@ -276,15 +316,15 @@ class SparseSelfAdjointTimeDenseProduct
};
namespace internal {
template<typename Lhs, typename Rhs, int UpLo>
struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
: traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
template<typename Lhs, typename Rhs, int Mode>
struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,Mode> >
: traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,Mode>, Lhs, Rhs> >
{};
}
template<typename Lhs, typename Rhs, int UpLo>
template<typename Lhs, typename Rhs, int Mode>
class DenseTimeSparseSelfAdjointProduct
: public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
: public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,Mode>, Lhs, Rhs>
{
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
@ -301,16 +341,197 @@ class DenseTimeSparseSelfAdjointProduct
DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
};
#else // EIGEN_TEST_EVALUATORS
namespace internal {
template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
EIGEN_ONLY_USED_FOR_DEBUG(alpha);
// TODO use alpha
eigen_assert(alpha==AlphaType(1) && "alpha != 1 is not implemented yet, sorry");
typedef typename evaluator<SparseLhsType>::type LhsEval;
typedef typename evaluator<SparseLhsType>::InnerIterator LhsIterator;
typedef typename SparseLhsType::Index Index;
typedef typename SparseLhsType::Scalar LhsScalar;
enum {
LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
ProcessFirstHalf =
((Mode&(Upper|Lower))==(Upper|Lower))
|| ( (Mode&Upper) && !LhsIsRowMajor)
|| ( (Mode&Lower) && LhsIsRowMajor),
ProcessSecondHalf = !ProcessFirstHalf
};
LhsEval lhsEval(lhs);
for (Index j=0; j<lhs.outerSize(); ++j)
{
LhsIterator i(lhsEval,j);
if (ProcessSecondHalf)
{
while (i && i.index()<j) ++i;
if(i && i.index()==j)
{
res.row(j) += i.value() * rhs.row(j);
++i;
}
}
for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
{
Index a = LhsIsRowMajor ? j : i.index();
Index b = LhsIsRowMajor ? i.index() : j;
LhsScalar v = i.value();
res.row(a) += (v) * rhs.row(b);
res.row(b) += numext::conj(v) * rhs.row(a);
}
if (ProcessFirstHalf && i && (i.index()==j))
res.row(j) += i.value() * rhs.row(j);
}
}
struct SparseSelfAdjointShape { static std::string debugName() { return "SparseSelfAdjointShape"; } };
// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
// in the future selfadjoint-ness should be defined by the expression traits
// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
template<typename MatrixType, unsigned int Mode>
struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
{
typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
typedef SparseSelfAdjointShape Shape;
static const int AssumeAliasing = 0;
};
template<typename LhsView, typename Rhs, int ProductType>
struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
{
template<typename Dest>
static void evalTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs)
{
typedef typename LhsView::_MatrixTypeNested Lhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhsView.matrix());
RhsNested rhsNested(rhs);
dst.setZero();
internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, typename Dest::Scalar(1));
}
};
template<typename Lhs, typename RhsView, int ProductType>
struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
{
template<typename Dest>
static void evalTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView)
{
typedef typename RhsView::_MatrixTypeNested Rhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhsView.matrix());
dst.setZero();
// transpoe everything
Transpose<Dest> dstT(dst);
internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, typename Dest::Scalar(1));
}
};
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, DenseShape, typename Lhs::Scalar, typename Rhs::Scalar>
: public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>::type
{
typedef Product<Lhs, Rhs, DefaultProduct> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef typename evaluator<PlainObject>::type Base;
product_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<Lhs, Rhs, SparseSelfAdjointShape, DenseShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
}
protected:
PlainObject m_result;
};
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, DenseShape, SparseSelfAdjointShape, typename Lhs::Scalar, typename Rhs::Scalar>
: public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>::type
{
typedef Product<Lhs, Rhs, DefaultProduct> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef typename evaluator<PlainObject>::type Base;
product_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<Lhs, Rhs, DenseShape, SparseSelfAdjointShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
}
protected:
PlainObject m_result;
};
template<typename LhsView, typename Rhs, int ProductTag>
struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape, typename LhsView::Scalar, typename Rhs::Scalar>
: public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>::type
{
typedef Product<LhsView, Rhs, DefaultProduct> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef typename evaluator<PlainObject>::type Base;
product_evaluator(const XprType& xpr)
: /*m_lhs(xpr.lhs()),*/ m_result(xpr.rows(), xpr.cols())
{
m_lhs = xpr.lhs();
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
}
protected:
typename Rhs::PlainObject m_lhs;
PlainObject m_result;
};
template<typename Lhs, typename RhsView, int ProductTag>
struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape, typename Lhs::Scalar, typename RhsView::Scalar>
: public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>::type
{
typedef Product<Lhs, RhsView, DefaultProduct> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef typename evaluator<PlainObject>::type Base;
product_evaluator(const XprType& xpr)
: m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
}
protected:
typename Lhs::PlainObject m_rhs;
PlainObject m_result;
};
} // namespace internal
#endif // EIGEN_TEST_EVALUATORS
/***************************************************************************
* Implementation of symmetric copies and permutations
***************************************************************************/
namespace internal {
template<typename MatrixType, int UpLo>
struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
};
template<int UpLo,typename MatrixType,int DestOrder>
template<int Mode,typename MatrixType,int DestOrder>
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
{
typedef typename MatrixType::Index Index;
@ -337,11 +558,11 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
Index r = it.row();
Index c = it.col();
Index ip = perm ? perm[i] : i;
if(UpLo==(Upper|Lower))
if(Mode==(Upper|Lower))
count[StorageOrderMatch ? jp : ip]++;
else if(r==c)
count[ip]++;
else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
{
count[ip]++;
count[jp]++;
@ -370,7 +591,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
Index jp = perm ? perm[j] : j;
Index ip = perm ? perm[i] : i;
if(UpLo==(Upper|Lower))
if(Mode==(Upper|Lower))
{
Index k = count[StorageOrderMatch ? jp : ip]++;
dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
@ -382,7 +603,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
dest.innerIndexPtr()[k] = ip;
dest.valuePtr()[k] = it.value();
}
else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
{
if(!StorageOrderMatch)
std::swap(ip,jp);
@ -397,7 +618,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
}
}
template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
{
typedef typename MatrixType::Index Index;
@ -407,8 +628,8 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
enum {
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
};
Index size = mat.rows();
@ -421,11 +642,11 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
{
Index i = it.index();
if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
Index ip = perm ? perm[i] : i;
count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
}
}
dest.outerIndexPtr()[0] = 0;
@ -441,17 +662,17 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
{
Index i = it.index();
if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
Index jp = perm ? perm[j] : j;
Index ip = perm? perm[i] : i;
Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
if(!StorageOrderMatch) std::swap(ip,jp);
if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
dest.valuePtr()[k] = numext::conj(it.value());
else
dest.valuePtr()[k] = it.value();
@ -461,9 +682,19 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
}
template<typename MatrixType,int UpLo>
#ifndef EIGEN_TEST_EVALUATORS
namespace internal {
template<typename MatrixType, int Mode>
struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
};
}
template<typename MatrixType,int Mode>
class SparseSymmetricPermutationProduct
: public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
: public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
{
public:
typedef typename MatrixType::Scalar Scalar;
@ -485,15 +716,15 @@ class SparseSymmetricPermutationProduct
template<typename DestScalar, int Options, typename DstIndex>
void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
{
// internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
// internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
SparseMatrix<DestScalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data());
internal::permute_symm_to_fullsymm<Mode>(m_matrix,tmp,m_perm.indices().data());
_dest = tmp;
}
template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
template<typename DestType,unsigned int DestMode> void evalTo(SparseSelfAdjointView<DestType,DestMode>& dest) const
{
internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
internal::permute_symm_to_symm<Mode,DestMode>(m_matrix,dest.matrix(),m_perm.indices().data());
}
protected:
@ -502,6 +733,10 @@ class SparseSymmetricPermutationProduct
};
#else // EIGEN_TEST_EVALUATORS
#endif // EIGEN_TEST_EVALUATORS
} // end namespace Eigen
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H

View File

@ -19,7 +19,7 @@ template<typename SparseMatrixType> void sparse_product()
typedef typename SparseMatrixType::Scalar Scalar;
enum { Flags = SparseMatrixType::Flags };
double density = (std::max)(8./(rows*cols), 0.1);
double density = (std::max)(8./(rows*cols), 0.2);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
typedef Matrix<Scalar,1,Dynamic> RowDenseVector;
@ -109,7 +109,7 @@ template<typename SparseMatrixType> void sparse_product()
Index c1 = internal::random<Index>(0,cols-1);
Index r1 = internal::random<Index>(0,depth-1);
DenseMatrix dm5 = DenseMatrix::Random(depth, cols);
VERIFY_IS_APPROX( m4=m2.col(c)*dm5.col(c1).transpose(), refMat4=refMat2.col(c)*dm5.col(c1).transpose());
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
VERIFY_IS_APPROX( m4=m2.middleCols(c,1)*dm5.col(c1).transpose(), refMat4=refMat2.col(c)*dm5.col(c1).transpose());
@ -153,11 +153,11 @@ template<typename SparseMatrixType> void sparse_product()
RowSpVector rv0(depth), rv1;
RowDenseVector drv0(depth), drv1(rv1);
initSparse(2*density,drv0, rv0);
VERIFY_IS_APPROX(cv1=rv0*m3, dcv1=drv0*refMat3);
VERIFY_IS_APPROX(cv1=m3*cv0, dcv1=refMat3*dcv0);
VERIFY_IS_APPROX(rv1=rv0*m3, drv1=drv0*refMat3);
VERIFY_IS_APPROX(cv1=m3*cv0, dcv1=refMat3*dcv0);
VERIFY_IS_APPROX(cv1=m3t.adjoint()*cv0, dcv1=refMat3t.adjoint()*dcv0);
VERIFY_IS_APPROX(cv1=rv0*m3, dcv1=drv0*refMat3);
VERIFY_IS_APPROX(rv1=m3*cv0, drv1=refMat3*dcv0);
}