mirror of
https://gitlab.com/libeigen/eigen.git
synced 2024-12-21 07:19:46 +08:00
Merged eigen/eigen into default
This commit is contained in:
commit
2a98bd9c8e
@ -10,7 +10,7 @@
|
||||
#ifndef EIGEN_CHOLMODSUPPORT_H
|
||||
#define EIGEN_CHOLMODSUPPORT_H
|
||||
|
||||
namespace Eigen {
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@ -79,12 +79,12 @@ cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> >
|
||||
|
||||
res.dtype = 0;
|
||||
res.stype = -1;
|
||||
|
||||
|
||||
if (internal::is_same<_StorageIndex,int>::value)
|
||||
{
|
||||
res.itype = CHOLMOD_INT;
|
||||
}
|
||||
else if (internal::is_same<_StorageIndex,long>::value)
|
||||
else if (internal::is_same<_StorageIndex,SuiteSparse_long>::value)
|
||||
{
|
||||
res.itype = CHOLMOD_LONG;
|
||||
}
|
||||
@ -95,9 +95,9 @@ cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> >
|
||||
|
||||
// setup res.xtype
|
||||
internal::cholmod_configure_matrix<_Scalar>::run(res);
|
||||
|
||||
|
||||
res.stype = 0;
|
||||
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>
|
||||
cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<const SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)
|
||||
{
|
||||
cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.matrix().const_cast_derived()));
|
||||
|
||||
|
||||
if(UpLo==Upper) res.stype = 1;
|
||||
if(UpLo==Lower) res.stype = -1;
|
||||
// swap stype for rowmajor matrices (only works for real matrices)
|
||||
@ -168,11 +168,11 @@ namespace internal {
|
||||
|
||||
#define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \
|
||||
template<typename _StorageIndex> inline ret cm_ ## name (cholmod_common &Common) { return cholmod_ ## name (&Common); } \
|
||||
template<> inline ret cm_ ## name<long> (cholmod_common &Common) { return cholmod_l_ ## name (&Common); }
|
||||
template<> inline ret cm_ ## name<SuiteSparse_long> (cholmod_common &Common) { return cholmod_l_ ## name (&Common); }
|
||||
|
||||
#define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \
|
||||
template<typename _StorageIndex> inline ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_ ## name (&a1, &Common); } \
|
||||
template<> inline ret cm_ ## name<long> (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); }
|
||||
template<> inline ret cm_ ## name<SuiteSparse_long> (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); }
|
||||
|
||||
EIGEN_CHOLMOD_SPECIALIZE0(int, start)
|
||||
EIGEN_CHOLMOD_SPECIALIZE0(int, finish)
|
||||
@ -184,15 +184,15 @@ EIGEN_CHOLMOD_SPECIALIZE1(int, free_sparse, cholmod_sparse*, A)
|
||||
EIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A)
|
||||
|
||||
template<typename _StorageIndex> inline cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_solve (sys, &L, &B, &Common); }
|
||||
template<> inline cholmod_dense* cm_solve<long> (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); }
|
||||
template<> inline cholmod_dense* cm_solve<SuiteSparse_long> (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); }
|
||||
|
||||
template<typename _StorageIndex> inline cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_spsolve (sys, &L, &B, &Common); }
|
||||
template<> inline cholmod_sparse* cm_spsolve<long> (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); }
|
||||
template<> inline cholmod_sparse* cm_spsolve<SuiteSparse_long> (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); }
|
||||
|
||||
template<typename _StorageIndex>
|
||||
inline int cm_factorize_p (cholmod_sparse* A, double beta[2], _StorageIndex* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_factorize_p (A, beta, fset, fsize, L, &Common); }
|
||||
template<>
|
||||
inline int cm_factorize_p<long> (cholmod_sparse* A, double beta[2], long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); }
|
||||
inline int cm_factorize_p<SuiteSparse_long> (cholmod_sparse* A, double beta[2], SuiteSparse_long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); }
|
||||
|
||||
#undef EIGEN_CHOLMOD_SPECIALIZE0
|
||||
#undef EIGEN_CHOLMOD_SPECIALIZE1
|
||||
@ -254,10 +254,10 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
internal::cm_free_factor<StorageIndex>(m_cholmodFactor, m_cholmod);
|
||||
internal::cm_finish<StorageIndex>(m_cholmod);
|
||||
}
|
||||
|
||||
|
||||
inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
|
||||
inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
|
||||
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was successful,
|
||||
@ -276,11 +276,11 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
factorize(matrix);
|
||||
return derived();
|
||||
}
|
||||
|
||||
|
||||
/** Performs a symbolic decomposition on the sparsity pattern of \a matrix.
|
||||
*
|
||||
* This function is particularly useful when solving for several problems having the same structure.
|
||||
*
|
||||
*
|
||||
* \sa factorize()
|
||||
*/
|
||||
void analyzePattern(const MatrixType& matrix)
|
||||
@ -292,13 +292,13 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
}
|
||||
cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
|
||||
m_cholmodFactor = internal::cm_analyze<StorageIndex>(A, m_cholmod);
|
||||
|
||||
|
||||
this->m_isInitialized = true;
|
||||
this->m_info = Success;
|
||||
m_analysisIsOk = true;
|
||||
m_factorizationIsOk = false;
|
||||
}
|
||||
|
||||
|
||||
/** Performs a numeric decomposition of \a matrix
|
||||
*
|
||||
* The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed.
|
||||
@ -315,11 +315,11 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue);
|
||||
m_factorizationIsOk = true;
|
||||
}
|
||||
|
||||
|
||||
/** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.
|
||||
* See the Cholmod user guide for details. */
|
||||
cholmod_common& cholmod() { return m_cholmod; }
|
||||
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \internal */
|
||||
template<typename Rhs,typename Dest>
|
||||
@ -329,7 +329,7 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
const Index size = m_cholmodFactor->n;
|
||||
EIGEN_UNUSED_VARIABLE(size);
|
||||
eigen_assert(size==b.rows());
|
||||
|
||||
|
||||
// Cholmod needs column-major storage without inner-stride, which corresponds to the default behavior of Ref.
|
||||
Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b.derived());
|
||||
|
||||
@ -345,7 +345,7 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
|
||||
internal::cm_free_dense<StorageIndex>(x_cd, m_cholmod);
|
||||
}
|
||||
|
||||
|
||||
/** \internal */
|
||||
template<typename RhsDerived, typename DestDerived>
|
||||
void _solve_impl(const SparseMatrixBase<RhsDerived> &b, SparseMatrixBase<DestDerived> &dest) const
|
||||
@ -370,8 +370,8 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
internal::cm_free_sparse<StorageIndex>(x_cs, m_cholmod);
|
||||
}
|
||||
#endif // EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
|
||||
|
||||
|
||||
/** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization.
|
||||
*
|
||||
* During the numerical factorization, an offset term is added to the diagonal coefficients:\n
|
||||
@ -386,7 +386,7 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
m_shiftOffset[0] = double(offset);
|
||||
return derived();
|
||||
}
|
||||
|
||||
|
||||
/** \returns the determinant of the underlying matrix from the current factorization */
|
||||
Scalar determinant() const
|
||||
{
|
||||
@ -441,7 +441,7 @@ class CholmodBase : public SparseSolverBase<Derived>
|
||||
template<typename Stream>
|
||||
void dumpMemory(Stream& /*s*/)
|
||||
{}
|
||||
|
||||
|
||||
protected:
|
||||
mutable cholmod_common m_cholmod;
|
||||
cholmod_factor* m_cholmodFactor;
|
||||
@ -478,11 +478,11 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
|
||||
{
|
||||
typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base;
|
||||
using Base::m_cholmod;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
|
||||
typedef _MatrixType MatrixType;
|
||||
|
||||
|
||||
CholmodSimplicialLLT() : Base() { init(); }
|
||||
|
||||
CholmodSimplicialLLT(const MatrixType& matrix) : Base()
|
||||
@ -529,11 +529,11 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
|
||||
{
|
||||
typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base;
|
||||
using Base::m_cholmod;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
|
||||
typedef _MatrixType MatrixType;
|
||||
|
||||
|
||||
CholmodSimplicialLDLT() : Base() { init(); }
|
||||
|
||||
CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
|
||||
@ -578,11 +578,11 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
|
||||
{
|
||||
typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base;
|
||||
using Base::m_cholmod;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
|
||||
typedef _MatrixType MatrixType;
|
||||
|
||||
|
||||
CholmodSupernodalLLT() : Base() { init(); }
|
||||
|
||||
CholmodSupernodalLLT(const MatrixType& matrix) : Base()
|
||||
@ -629,11 +629,11 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
|
||||
{
|
||||
typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base;
|
||||
using Base::m_cholmod;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
|
||||
typedef _MatrixType MatrixType;
|
||||
|
||||
|
||||
CholmodDecomposition() : Base() { init(); }
|
||||
|
||||
CholmodDecomposition(const MatrixType& matrix) : Base()
|
||||
@ -643,7 +643,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
|
||||
}
|
||||
|
||||
~CholmodDecomposition() {}
|
||||
|
||||
|
||||
void setMode(CholmodMode mode)
|
||||
{
|
||||
switch(mode)
|
||||
|
@ -405,7 +405,7 @@ template<typename T> struct plain_matrix_type_row_major
|
||||
typedef Matrix<typename traits<T>::Scalar,
|
||||
Rows,
|
||||
Cols,
|
||||
(MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
|
||||
(MaxCols==1&&MaxRows!=1) ? ColMajor : RowMajor,
|
||||
MaxRows,
|
||||
MaxCols
|
||||
> type;
|
||||
|
@ -73,7 +73,7 @@ struct TensorOpResourceRequirements {
|
||||
// expression tree (like reductions) to communicate resources
|
||||
// requirements based on local state (like the total number of reductions
|
||||
// to be computed).
|
||||
TensorOpResourceRequirements(internal::TensorBlockShapeType shape,
|
||||
TensorOpResourceRequirements(TensorBlockShapeType shape,
|
||||
const Index size)
|
||||
: block_shape(shape), block_total_size(size) {}
|
||||
};
|
||||
@ -90,9 +90,9 @@ EIGEN_STRONG_INLINE void MergeResourceRequirements(
|
||||
*block_shape = resources[0].block_shape;
|
||||
*block_total_size = resources[0].block_total_size;
|
||||
for (std::vector<TensorOpResourceRequirements>::size_type i = 1; i < resources.size(); ++i) {
|
||||
if (resources[i].block_shape == TensorBlockShapeType::kSkewedInnerDims &&
|
||||
*block_shape != TensorBlockShapeType::kSkewedInnerDims) {
|
||||
*block_shape = TensorBlockShapeType::kSkewedInnerDims;
|
||||
if (resources[i].block_shape == kSkewedInnerDims &&
|
||||
*block_shape ! kSkewedInnerDims) {
|
||||
*block_shape = kSkewedInnerDims;
|
||||
}
|
||||
*block_total_size =
|
||||
numext::maxi(*block_total_size, resources[i].block_total_size);
|
||||
@ -178,9 +178,9 @@ template <typename Scalar, typename StorageIndex, int NumDims, int Layout,
|
||||
bool BlockRead>
|
||||
class TensorBlockIO {
|
||||
public:
|
||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
TensorBlock;
|
||||
typedef typename internal::TensorBlockCopyOp<Scalar, StorageIndex>
|
||||
typedef typename TensorBlockCopyOp<Scalar, StorageIndex>
|
||||
TensorBlockCopyOp;
|
||||
|
||||
protected:
|
||||
@ -320,7 +320,7 @@ template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
||||
class TensorBlockReader : public TensorBlockIO<Scalar, StorageIndex, NumDims,
|
||||
Layout, /*BlockRead=*/true> {
|
||||
public:
|
||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
TensorBlock;
|
||||
typedef TensorBlockIO<Scalar, StorageIndex, NumDims, Layout, /*BlockRead=*/true>
|
||||
Base;
|
||||
@ -357,7 +357,7 @@ template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
||||
class TensorBlockWriter : public TensorBlockIO<Scalar, StorageIndex, NumDims,
|
||||
Layout, /*BlockRead=*/false> {
|
||||
public:
|
||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
TensorBlock;
|
||||
typedef TensorBlockIO<Scalar, StorageIndex, NumDims, Layout, /*BlockRead=*/false>
|
||||
Base;
|
||||
@ -434,7 +434,7 @@ struct TensorBlockCwiseBinaryOp {
|
||||
template <typename BinaryFunctor, typename StorageIndex, typename OutputScalar,
|
||||
int NumDims, int Layout>
|
||||
struct TensorBlockCwiseBinaryIO {
|
||||
typedef typename internal::TensorBlock<OutputScalar, StorageIndex, NumDims,
|
||||
typedef typename TensorBlock<OutputScalar, StorageIndex, NumDims,
|
||||
Layout>::Dimensions Dimensions;
|
||||
|
||||
struct BlockIteratorState {
|
||||
@ -627,7 +627,7 @@ struct TensorBlockView {
|
||||
template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
||||
class TensorBlockMapper {
|
||||
public:
|
||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
TensorBlock;
|
||||
typedef DSizes<StorageIndex, NumDims> Dimensions;
|
||||
|
||||
@ -742,7 +742,7 @@ class TensorBlockMapper {
|
||||
block_dim_sizes[i] = 1;
|
||||
}
|
||||
} else if (block_dim_sizes.TotalSize() > min_target_size) {
|
||||
if (block_shape == TensorBlockShapeType::kUniformAllDims) {
|
||||
if (block_shape == kUniformAllDims) {
|
||||
// Tensor will not fit within 'min_target_size' budget: calculate tensor
|
||||
// block dimension sizes based on "square" dimension size target.
|
||||
const size_t dim_size_target = static_cast<const size_t>(
|
||||
@ -773,7 +773,7 @@ class TensorBlockMapper {
|
||||
total_size = total_size_other_dims * block_dim_sizes[dim];
|
||||
}
|
||||
}
|
||||
} else if (block_shape == TensorBlockShapeType::kSkewedInnerDims) {
|
||||
} else if (block_shape == kSkewedInnerDims) {
|
||||
StorageIndex coeff_to_allocate = min_target_size;
|
||||
for (int i = 0; i < NumDims; ++i) {
|
||||
const int dim = cond<Layout>()(i, NumDims - i - 1);
|
||||
@ -818,7 +818,7 @@ class TensorBlockMapper {
|
||||
template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
||||
class TensorSliceBlockMapper {
|
||||
public:
|
||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||
TensorBlock;
|
||||
typedef DSizes<StorageIndex, NumDims> Dimensions;
|
||||
|
||||
|
@ -155,7 +155,7 @@ struct TensorContractionParams {
|
||||
// See expected implementation in NoOpOutputKernel.
|
||||
struct OutputKernel {
|
||||
template <typename Index, typename Scalar>
|
||||
using OutputMapper = internal::blas_data_mapper<Scalar, Index, ColMajor>;
|
||||
typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
|
||||
};
|
||||
|
||||
// Output kernel that does absolutely nothing.
|
||||
|
Loading…
Reference in New Issue
Block a user