mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-12 19:20:36 +08:00
bug #482: pass scalar arguments by const references. Still remains a few cases that might affect the ABI (see the bug entry)
This commit is contained in:
parent
cc35c44256
commit
12a1313b09
@ -196,7 +196,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
LDLT& compute(const MatrixType& matrix);
|
||||
|
||||
template <typename Derived>
|
||||
LDLT& rankUpdate(const MatrixBase<Derived>& w,RealScalar alpha=1);
|
||||
LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1);
|
||||
|
||||
/** \returns the internal LDLT decomposition matrix
|
||||
*
|
||||
@ -347,7 +347,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
// Here only rank-1 updates are implemented, to reduce the
|
||||
// requirement for intermediate storage and improve accuracy
|
||||
template<typename MatrixType, typename WDerived>
|
||||
static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, typename MatrixType::RealScalar sigma=1)
|
||||
static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, const typename MatrixType::RealScalar& sigma=1)
|
||||
{
|
||||
using internal::isfinite;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
@ -386,7 +386,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
}
|
||||
|
||||
template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
|
||||
static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, typename MatrixType::RealScalar sigma=1)
|
||||
static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1)
|
||||
{
|
||||
// Apply the permutation to the input w
|
||||
tmp = transpositions * w;
|
||||
@ -405,7 +405,7 @@ template<> struct ldlt_inplace<Upper>
|
||||
}
|
||||
|
||||
template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
|
||||
static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, typename MatrixType::RealScalar sigma=1)
|
||||
static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1)
|
||||
{
|
||||
Transpose<MatrixType> matt(mat);
|
||||
return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma);
|
||||
@ -457,7 +457,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
*/
|
||||
template<typename MatrixType, int _UpLo>
|
||||
template<typename Derived>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w,typename NumTraits<typename MatrixType::Scalar>::Real sigma)
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
|
||||
{
|
||||
const Index size = w.rows();
|
||||
if (m_isInitialized)
|
||||
|
@ -551,7 +551,7 @@ struct linspaced_op_impl<Scalar,false>
|
||||
{
|
||||
typedef typename packet_traits<Scalar>::type Packet;
|
||||
|
||||
linspaced_op_impl(Scalar low, Scalar step) :
|
||||
linspaced_op_impl(const Scalar& low, const Scalar& step) :
|
||||
m_low(low), m_step(step),
|
||||
m_packetStep(pset1<Packet>(packet_traits<Scalar>::size*step)),
|
||||
m_base(padd(pset1<Packet>(low), pmul(pset1<Packet>(step),plset<Scalar>(-packet_traits<Scalar>::size)))) {}
|
||||
@ -580,7 +580,7 @@ struct linspaced_op_impl<Scalar,true>
|
||||
{
|
||||
typedef typename packet_traits<Scalar>::type Packet;
|
||||
|
||||
linspaced_op_impl(Scalar low, Scalar step) :
|
||||
linspaced_op_impl(const Scalar& low, const Scalar& step) :
|
||||
m_low(low), m_step(step),
|
||||
m_lowPacket(pset1<Packet>(m_low)), m_stepPacket(pset1<Packet>(m_step)), m_interPacket(plset<Scalar>(0)) {}
|
||||
|
||||
@ -609,7 +609,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
|
||||
template <typename Scalar, bool RandomAccess> struct linspaced_op
|
||||
{
|
||||
typedef typename packet_traits<Scalar>::type Packet;
|
||||
linspaced_op(Scalar low, Scalar high, int num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
|
||||
linspaced_op(const Scalar& low, const Scalar& high, int num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
|
||||
|
||||
template<typename Index>
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
|
||||
|
@ -270,7 +270,7 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
|
||||
internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, sub());
|
||||
}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
|
||||
{
|
||||
internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, adds(alpha));
|
||||
}
|
||||
@ -346,7 +346,7 @@ class GeneralProduct<Lhs, Rhs, GemvProduct>
|
||||
enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight };
|
||||
typedef typename internal::conditional<int(Side)==OnTheRight,_LhsNested,_RhsNested>::type MatrixType;
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
|
||||
{
|
||||
eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols());
|
||||
internal::gemv_selector<Side,(int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor,
|
||||
@ -361,7 +361,7 @@ template<int StorageOrder, bool BlasCompatible>
|
||||
struct gemv_selector<OnTheLeft,StorageOrder,BlasCompatible>
|
||||
{
|
||||
template<typename ProductType, typename Dest>
|
||||
static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
|
||||
static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
|
||||
{
|
||||
Transpose<Dest> destT(dest);
|
||||
enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
|
||||
@ -410,7 +410,7 @@ struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
|
||||
template<> struct gemv_selector<OnTheRight,ColMajor,true>
|
||||
{
|
||||
template<typename ProductType, typename Dest>
|
||||
static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
|
||||
static inline void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
|
||||
{
|
||||
typedef typename ProductType::Index Index;
|
||||
typedef typename ProductType::LhsScalar LhsScalar;
|
||||
@ -483,7 +483,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
|
||||
template<> struct gemv_selector<OnTheRight,RowMajor,true>
|
||||
{
|
||||
template<typename ProductType, typename Dest>
|
||||
static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
|
||||
static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
|
||||
{
|
||||
typedef typename ProductType::LhsScalar LhsScalar;
|
||||
typedef typename ProductType::RhsScalar RhsScalar;
|
||||
@ -534,7 +534,7 @@ template<> struct gemv_selector<OnTheRight,RowMajor,true>
|
||||
template<> struct gemv_selector<OnTheRight,ColMajor,false>
|
||||
{
|
||||
template<typename ProductType, typename Dest>
|
||||
static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
|
||||
static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
|
||||
{
|
||||
typedef typename Dest::Index Index;
|
||||
// TODO makes sure dest is sequentially stored in memory, otherwise use a temp
|
||||
@ -547,7 +547,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,false>
|
||||
template<> struct gemv_selector<OnTheRight,RowMajor,false>
|
||||
{
|
||||
template<typename ProductType, typename Dest>
|
||||
static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
|
||||
static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
|
||||
{
|
||||
typedef typename Dest::Index Index;
|
||||
// TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
|
||||
|
@ -108,7 +108,7 @@ class ProductBase : public MatrixBase<Derived>
|
||||
inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); }
|
||||
|
||||
template<typename Dest>
|
||||
inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); }
|
||||
inline void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { derived().scaleAndAddTo(dst,alpha); }
|
||||
|
||||
const _LhsNested& lhs() const { return m_lhs; }
|
||||
const _RhsNested& rhs() const { return m_rhs; }
|
||||
@ -241,7 +241,7 @@ class ScaledProduct
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
// EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct)
|
||||
|
||||
ScaledProduct(const NestedProduct& prod, Scalar x)
|
||||
ScaledProduct(const NestedProduct& prod, const Scalar& x)
|
||||
: Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {}
|
||||
|
||||
template<typename Dest>
|
||||
@ -254,7 +254,7 @@ class ScaledProduct
|
||||
inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); }
|
||||
|
||||
template<typename Dest>
|
||||
inline void scaleAndAddTo(Dest& dst,Scalar a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); }
|
||||
inline void scaleAndAddTo(Dest& dst, const Scalar& a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); }
|
||||
|
||||
const Scalar& alpha() const { return m_alpha; }
|
||||
|
||||
|
@ -204,7 +204,7 @@ struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
|
||||
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
|
||||
struct gemm_functor
|
||||
{
|
||||
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
|
||||
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha,
|
||||
BlockingType& blocking)
|
||||
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
|
||||
{}
|
||||
@ -395,7 +395,7 @@ class GeneralProduct<Lhs, Rhs, GemmProduct>
|
||||
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
|
||||
}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
|
||||
{
|
||||
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
|
||||
|
||||
|
@ -42,7 +42,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
||||
{
|
||||
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
|
||||
static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,
|
||||
const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha)
|
||||
const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, const ResScalar& alpha)
|
||||
{
|
||||
general_matrix_matrix_triangular_product<Index,
|
||||
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
|
||||
@ -58,7 +58,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
||||
{
|
||||
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
|
||||
static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride,
|
||||
const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha)
|
||||
const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, const ResScalar& alpha)
|
||||
{
|
||||
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
|
||||
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
|
||||
@ -136,7 +136,7 @@ struct tribb_kernel
|
||||
enum {
|
||||
BlockSize = EIGEN_PLAIN_ENUM_MAX(mr,nr)
|
||||
};
|
||||
void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, ResScalar alpha, RhsScalar* workspace)
|
||||
void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha, RhsScalar* workspace)
|
||||
{
|
||||
gebp_kernel<LhsScalar, RhsScalar, Index, mr, nr, ConjLhs, ConjRhs> gebp_kernel;
|
||||
Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer;
|
||||
|
@ -211,7 +211,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,Co
|
||||
const Scalar* lhs, Index lhsStride,
|
||||
const Scalar* rhs, Index rhsStride,
|
||||
Scalar* res, Index resStride,
|
||||
Scalar alpha)
|
||||
const Scalar& alpha)
|
||||
{
|
||||
product_selfadjoint_matrix<Scalar, Index,
|
||||
EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
|
||||
@ -234,7 +234,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
||||
const Scalar* _lhs, Index lhsStride,
|
||||
const Scalar* _rhs, Index rhsStride,
|
||||
Scalar* res, Index resStride,
|
||||
Scalar alpha)
|
||||
const Scalar& alpha)
|
||||
{
|
||||
Index size = rows;
|
||||
|
||||
@ -315,7 +315,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLh
|
||||
const Scalar* _lhs, Index lhsStride,
|
||||
const Scalar* _rhs, Index rhsStride,
|
||||
Scalar* res, Index resStride,
|
||||
Scalar alpha)
|
||||
const Scalar& alpha)
|
||||
{
|
||||
Index size = cols;
|
||||
|
||||
@ -383,7 +383,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>
|
||||
RhsIsSelfAdjoint = (RhsMode&SelfAdjoint)==SelfAdjoint
|
||||
};
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
|
||||
{
|
||||
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
|
||||
|
||||
|
@ -180,7 +180,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
|
||||
|
||||
SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
|
||||
{
|
||||
typedef typename Dest::Scalar ResScalar;
|
||||
typedef typename Base::RhsScalar RhsScalar;
|
||||
@ -260,7 +260,7 @@ struct SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>
|
||||
|
||||
SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
|
||||
{
|
||||
// let's simply transpose the product
|
||||
Transpose<Dest> destT(dest);
|
||||
|
@ -22,7 +22,7 @@ namespace Eigen {
|
||||
template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
|
||||
struct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>
|
||||
{
|
||||
static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, Scalar alpha)
|
||||
static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
|
||||
{
|
||||
internal::conj_if<ConjRhs> cj;
|
||||
typedef Map<const Matrix<Scalar,Dynamic,1> > OtherMap;
|
||||
@ -38,7 +38,7 @@ struct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>
|
||||
template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
|
||||
struct selfadjoint_rank1_update<Scalar,Index,RowMajor,UpLo,ConjLhs,ConjRhs>
|
||||
{
|
||||
static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, Scalar alpha)
|
||||
static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
|
||||
{
|
||||
selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,stride,vecY,vecX,alpha);
|
||||
}
|
||||
@ -50,7 +50,7 @@ struct selfadjoint_product_selector;
|
||||
template<typename MatrixType, typename OtherType, int UpLo>
|
||||
struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
|
||||
{
|
||||
static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha)
|
||||
static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
|
||||
{
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
@ -83,7 +83,7 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
|
||||
template<typename MatrixType, typename OtherType, int UpLo>
|
||||
struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
|
||||
{
|
||||
static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha)
|
||||
static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
|
||||
{
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
|
@ -24,7 +24,7 @@ struct selfadjoint_rank2_update_selector;
|
||||
template<typename Scalar, typename Index, typename UType, typename VType>
|
||||
struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
|
||||
{
|
||||
static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
|
||||
static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
|
||||
{
|
||||
const Index size = u.size();
|
||||
for (Index i=0; i<size; ++i)
|
||||
@ -39,7 +39,7 @@ struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
|
||||
template<typename Scalar, typename Index, typename UType, typename VType>
|
||||
struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>
|
||||
{
|
||||
static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
|
||||
static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
|
||||
{
|
||||
const Index size = u.size();
|
||||
for (Index i=0; i<size; ++i)
|
||||
|
@ -61,7 +61,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
|
||||
const Scalar* lhs, Index lhsStride,
|
||||
const Scalar* rhs, Index rhsStride,
|
||||
Scalar* res, Index resStride,
|
||||
Scalar alpha, level3_blocking<Scalar,Scalar>& blocking)
|
||||
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
|
||||
{
|
||||
product_triangular_matrix_matrix<Scalar, Index,
|
||||
(Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),
|
||||
@ -96,7 +96,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
||||
const Scalar* _lhs, Index lhsStride,
|
||||
const Scalar* _rhs, Index rhsStride,
|
||||
Scalar* res, Index resStride,
|
||||
Scalar alpha, level3_blocking<Scalar,Scalar>& blocking)
|
||||
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
|
||||
{
|
||||
// strip zeros
|
||||
Index diagSize = (std::min)(_rows,_depth);
|
||||
@ -225,7 +225,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
||||
const Scalar* _lhs, Index lhsStride,
|
||||
const Scalar* _rhs, Index rhsStride,
|
||||
Scalar* res, Index resStride,
|
||||
Scalar alpha, level3_blocking<Scalar,Scalar>& blocking)
|
||||
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
|
||||
{
|
||||
// strip zeros
|
||||
Index diagSize = (std::min)(_cols,_depth);
|
||||
@ -364,7 +364,7 @@ struct TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
|
||||
|
||||
TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
|
||||
{
|
||||
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
|
||||
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
|
||||
|
@ -27,7 +27,7 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
|
||||
HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
|
||||
};
|
||||
static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
|
||||
const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha)
|
||||
const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
|
||||
{
|
||||
static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
|
||||
Index size = (std::min)(_rows,_cols);
|
||||
@ -90,7 +90,7 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
|
||||
HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
|
||||
};
|
||||
static void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
|
||||
const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha)
|
||||
const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
|
||||
{
|
||||
static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
|
||||
Index diagSize = (std::min)(_rows,_cols);
|
||||
@ -171,7 +171,7 @@ struct TriangularProduct<Mode,true,Lhs,false,Rhs,true>
|
||||
|
||||
TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
|
||||
{
|
||||
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
|
||||
|
||||
@ -187,7 +187,7 @@ struct TriangularProduct<Mode,false,Lhs,true,Rhs,false>
|
||||
|
||||
TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
|
||||
{
|
||||
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
|
||||
|
||||
@ -205,7 +205,7 @@ namespace internal {
|
||||
template<> struct trmv_selector<ColMajor>
|
||||
{
|
||||
template<int Mode, typename Lhs, typename Rhs, typename Dest>
|
||||
static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar alpha)
|
||||
static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, const typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar& alpha)
|
||||
{
|
||||
typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
|
||||
typedef typename ProductType::Index Index;
|
||||
@ -281,7 +281,7 @@ template<> struct trmv_selector<ColMajor>
|
||||
template<> struct trmv_selector<RowMajor>
|
||||
{
|
||||
template<int Mode, typename Lhs, typename Rhs, typename Dest>
|
||||
static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar alpha)
|
||||
static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, const typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar& alpha)
|
||||
{
|
||||
typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
|
||||
typedef typename ProductType::LhsScalar LhsScalar;
|
||||
|
@ -242,7 +242,7 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
EigenvectorType m_matX;
|
||||
|
||||
private:
|
||||
void doComputeEigenvectors(RealScalar matrixnorm);
|
||||
void doComputeEigenvectors(const RealScalar& matrixnorm);
|
||||
void sortEigenvalues(bool computeEigenvectors);
|
||||
};
|
||||
|
||||
@ -273,7 +273,7 @@ ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEi
|
||||
|
||||
|
||||
template<typename MatrixType>
|
||||
void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm)
|
||||
void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(const RealScalar& matrixnorm)
|
||||
{
|
||||
const Index n = m_eivalues.size();
|
||||
|
||||
|
@ -410,7 +410,7 @@ EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvect
|
||||
|
||||
// Complex scalar division.
|
||||
template<typename Scalar>
|
||||
std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
|
||||
std::complex<Scalar> cdiv(const Scalar& xr, const Scalar& xi, const Scalar& yr, const Scalar& yi)
|
||||
{
|
||||
using std::abs;
|
||||
Scalar r,d;
|
||||
|
@ -234,8 +234,8 @@ template<typename _MatrixType> class RealSchur
|
||||
typedef Matrix<Scalar,3,1> Vector3s;
|
||||
|
||||
Scalar computeNormOfT();
|
||||
Index findSmallSubdiagEntry(Index iu, Scalar norm);
|
||||
void splitOffTwoRows(Index iu, bool computeU, Scalar exshift);
|
||||
Index findSmallSubdiagEntry(Index iu, const Scalar& norm);
|
||||
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
|
||||
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
|
||||
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
|
||||
void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
|
||||
@ -343,7 +343,7 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
|
||||
|
||||
/** \internal Look for single small sub-diagonal element and returns its index */
|
||||
template<typename MatrixType>
|
||||
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, Scalar norm)
|
||||
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& norm)
|
||||
{
|
||||
using std::abs;
|
||||
Index res = iu;
|
||||
@ -361,7 +361,7 @@ inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(I
|
||||
|
||||
/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
|
||||
template<typename MatrixType>
|
||||
inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scalar exshift)
|
||||
inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift)
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
|
@ -282,7 +282,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
* determined by \a prec.
|
||||
*
|
||||
* \sa MatrixBase::isApprox() */
|
||||
bool isApprox(const AlignedBox& other, RealScalar prec = ScalarTraits::dummy_precision()) const
|
||||
bool isApprox(const AlignedBox& other, const RealScalar& prec = ScalarTraits::dummy_precision()) const
|
||||
{ return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
|
||||
|
||||
protected:
|
||||
|
@ -110,7 +110,7 @@ class IncompleteLUT : internal::noncopyable
|
||||
{}
|
||||
|
||||
template<typename MatrixType>
|
||||
IncompleteLUT(const MatrixType& mat, RealScalar droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
|
||||
IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
|
||||
: m_droptol(droptol),m_fillfactor(fillfactor),
|
||||
m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
|
||||
{
|
||||
@ -154,7 +154,7 @@ class IncompleteLUT : internal::noncopyable
|
||||
return *this;
|
||||
}
|
||||
|
||||
void setDroptol(RealScalar droptol);
|
||||
void setDroptol(const RealScalar& droptol);
|
||||
void setFillfactor(int fillfactor);
|
||||
|
||||
template<typename Rhs, typename Dest>
|
||||
@ -203,7 +203,7 @@ protected:
|
||||
* \param droptol Drop any element whose magnitude is less than this tolerance
|
||||
**/
|
||||
template<typename Scalar>
|
||||
void IncompleteLUT<Scalar>::setDroptol(RealScalar droptol)
|
||||
void IncompleteLUT<Scalar>::setDroptol(const RealScalar& droptol)
|
||||
{
|
||||
this->m_droptol = droptol;
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ public:
|
||||
RealScalar tolerance() const { return m_tolerance; }
|
||||
|
||||
/** Sets the tolerance threshold used by the stopping criteria */
|
||||
Derived& setTolerance(RealScalar tolerance)
|
||||
Derived& setTolerance(const RealScalar& tolerance)
|
||||
{
|
||||
m_tolerance = tolerance;
|
||||
return derived();
|
||||
|
@ -63,7 +63,7 @@ template<typename Scalar> class JacobiRotation
|
||||
|
||||
template<typename Derived>
|
||||
bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
|
||||
bool makeJacobi(RealScalar x, Scalar y, RealScalar z);
|
||||
bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z);
|
||||
|
||||
void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
|
||||
|
||||
@ -80,7 +80,7 @@ template<typename Scalar> class JacobiRotation
|
||||
* \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
|
||||
*/
|
||||
template<typename Scalar>
|
||||
bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
|
||||
bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z)
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
|
@ -187,7 +187,7 @@ class SPQR
|
||||
/// Set the fill-reducing ordering method to be used
|
||||
void setSPQROrdering(int ord) { m_ordering = ord;}
|
||||
/// Set the tolerance tol to treat columns with 2-norm < =tol as zero
|
||||
void setPivotThreshold(RealScalar tol) { m_tolerance = tol; }
|
||||
void setPivotThreshold(const RealScalar& tol) { m_tolerance = tol; }
|
||||
|
||||
/** \returns a pointer to the SPQR workspace */
|
||||
cholmod_common *cholmodCommon() const { return &m_cc; }
|
||||
|
@ -288,7 +288,7 @@ class AmbiVector<_Scalar,_Index>::Iterator
|
||||
* In practice, all coefficients having a magnitude smaller than \a epsilon
|
||||
* are skipped.
|
||||
*/
|
||||
Iterator(const AmbiVector& vec, RealScalar epsilon = 0)
|
||||
Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
|
||||
: m_vector(vec)
|
||||
{
|
||||
using std::abs;
|
||||
|
@ -134,7 +134,7 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||
return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
|
||||
}
|
||||
/** Set the threshold used for a diagonal entry to be an acceptable pivot. */
|
||||
void setPivotThreshold(RealScalar thresh)
|
||||
void setPivotThreshold(const RealScalar& thresh)
|
||||
{
|
||||
m_diagpivotthresh = thresh;
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ class SparseLUImpl
|
||||
void relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end);
|
||||
Index snode_dfs(const Index jcol, const Index kcol,const MatrixType& mat, IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu);
|
||||
Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu);
|
||||
Index pivotL(const Index jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu);
|
||||
Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu);
|
||||
template <typename Traits>
|
||||
void dfs_kernel(const Index jj, IndexVector& perm_r,
|
||||
Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
|
||||
|
@ -57,7 +57,7 @@ namespace internal {
|
||||
*
|
||||
*/
|
||||
template <typename Scalar, typename Index>
|
||||
Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu)
|
||||
Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu)
|
||||
{
|
||||
|
||||
Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol
|
||||
|
@ -216,7 +216,7 @@ int EIGEN_BLAS_FUNC(hpr2)(char *uplo, int *n, RealScalar *palpha, RealScalar *px
|
||||
*/
|
||||
int EIGEN_BLAS_FUNC(her)(char *uplo, int *n, RealScalar *palpha, RealScalar *px, int *incx, RealScalar *pa, int *lda)
|
||||
{
|
||||
typedef void (*functype)(int, Scalar*, int, const Scalar*, const Scalar*, Scalar);
|
||||
typedef void (*functype)(int, Scalar*, int, const Scalar*, const Scalar*, const Scalar&);
|
||||
static functype func[2];
|
||||
|
||||
static bool init = false;
|
||||
|
@ -130,7 +130,7 @@ int EIGEN_BLAS_FUNC(trsv)(char *uplo, char *opa, char *diag, int *n, RealScalar
|
||||
|
||||
int EIGEN_BLAS_FUNC(trmv)(char *uplo, char *opa, char *diag, int *n, RealScalar *pa, int *lda, RealScalar *pb, int *incb)
|
||||
{
|
||||
typedef void (*functype)(int, int, const Scalar *, int, const Scalar *, int, Scalar *, int, Scalar);
|
||||
typedef void (*functype)(int, int, const Scalar *, int, const Scalar *, int, Scalar *, int, const Scalar&);
|
||||
static functype func[16];
|
||||
|
||||
static bool init = false;
|
||||
|
@ -85,7 +85,7 @@ int EIGEN_BLAS_FUNC(syr)(char *uplo, int *n, RealScalar *palpha, RealScalar *px,
|
||||
|
||||
// init = true;
|
||||
// }
|
||||
typedef void (*functype)(int, Scalar*, int, const Scalar*, const Scalar*, Scalar);
|
||||
typedef void (*functype)(int, Scalar*, int, const Scalar*, const Scalar*, const Scalar&);
|
||||
static functype func[2];
|
||||
|
||||
static bool init = false;
|
||||
|
@ -152,7 +152,7 @@ int EIGEN_BLAS_FUNC(trsm)(char *side, char *uplo, char *opa, char *diag, int *m,
|
||||
int EIGEN_BLAS_FUNC(trmm)(char *side, char *uplo, char *opa, char *diag, int *m, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb)
|
||||
{
|
||||
// std::cerr << "in trmm " << *side << " " << *uplo << " " << *opa << " " << *diag << " " << *m << " " << *n << " " << *lda << " " << *ldb << " " << *palpha << "\n";
|
||||
typedef void (*functype)(DenseIndex, DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar, internal::level3_blocking<Scalar,Scalar>&);
|
||||
typedef void (*functype)(DenseIndex, DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, const Scalar&, internal::level3_blocking<Scalar,Scalar>&);
|
||||
static functype func[32];
|
||||
static bool init = false;
|
||||
if(!init)
|
||||
@ -306,7 +306,7 @@ int EIGEN_BLAS_FUNC(syrk)(char *uplo, char *op, int *n, int *k, RealScalar *palp
|
||||
{
|
||||
// std::cerr << "in syrk " << *uplo << " " << *op << " " << *n << " " << *k << " " << *palpha << " " << *lda << " " << *pbeta << " " << *ldc << "\n";
|
||||
#if !ISCOMPLEX
|
||||
typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar);
|
||||
typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, const Scalar&);
|
||||
static functype func[8];
|
||||
|
||||
static bool init = false;
|
||||
@ -500,7 +500,7 @@ int EIGEN_BLAS_FUNC(hemm)(char *side, char *uplo, int *m, int *n, RealScalar *pa
|
||||
// c = alpha*conj(a')*a + beta*c for op = 'C'or'c'
|
||||
int EIGEN_BLAS_FUNC(herk)(char *uplo, char *op, int *n, int *k, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pbeta, RealScalar *pc, int *ldc)
|
||||
{
|
||||
typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, Scalar);
|
||||
typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, const Scalar&);
|
||||
static functype func[8];
|
||||
|
||||
static bool init = false;
|
||||
|
Loading…
x
Reference in New Issue
Block a user